source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
check.py
|
# -*- coding:utf-8 -*-
from PythonMiddleware.notify import Notify
from PythonMiddleware.graphene import Graphene
from PythonMiddlewarebase.operationids import operations
import sys
import pymongo
import datetime
from time import sleep
from collections import deque
from threading import Thread, Lock
from prometheus_client import CollectorRegistry, Gauge, pushadd_to_gateway
from config import *
from utils import Logging
from handle_block import logger, parse_operations, handle_operations, init_gauges
#logger = Logging().getLogger()
block_info_q = deque()
pending_block_num_q = deque()
op_d = deque()
#thread lock
block_info_deque_lock = Lock()
pending_block_num_deque_lock = Lock()
op_deque_lock = Lock()
def check_block(args):
def one_block_check(block_num):
logger.info('recv block number: {}'.format(block_num))
try:
block = gph.rpc.get_block(block_num)
#witness_id = block['witness']
block_witness = gph.rpc.get_object(gph.rpc.get_object(block['witness'])['witness_account'])['name']
except Exception as e:
logger.error('get_object exception. block {}, error {}'.format(block_num, repr(e)))
block_time = block['timestamp']
transactions = block["transactions"]
witness_sign = block['witness_signature']
trx_total = 0
ops_total = 0
transactions_id = []
if transactions:
trx_total = len(transactions)
for trx in transactions:
transactions_id.append(trx[0])
ops_total += len(trx[1]["operations"])
block_data = {
"block_num": block_num,
"time": block_time,
"witness": block_witness,
"witness_sign": witness_sign,
"transactions_total": trx_total,
"transactions_id": transactions_id,
"operations_total": ops_total
}
block_info_deque_lock.acquire()
block_info_q.append(block_data)
block_info_deque_lock.release()
start = args[0]
end = args[1]
gph = Graphene(node=nodeaddress)
info = gph.info()
last_block_num = info['head_block_number']
#logger.info('last_block_num: {}, block start: {}, end: {}, info: {}'.format(last_block_num, start, end, info))
logger.info('last_block_num: {}, block start: {}, end: {}'.format(last_block_num, start, end))
if start > last_block_num:
logger.error("start:{} < end:{}".format(start, end))
return
if end > last_block_num:
end = last_block_num
conn = pymongo.MongoClient(mongodb_params['host'], mongodb_params['port'])
conn_db = conn[mongodb_params['db_name']]
for index in range(start, end+1):
result = conn_db.block.find({'block_num':index})
if result.count() == 0:
logger.info('check block number: {}'.format(index))
one_block_check(index)
else:
logger.info('block({}) already exists in mongodb'.format(index))
sleep(0.1)
conn.close()
# 解析区块
def analysis_block():
gph = Graphene(node=nodeaddress)
from PythonMiddleware.instance import set_shared_graphene_instance
set_shared_graphene_instance(gph)
while 1:
if pending_block_num_q:
try:
pending_block_num_deque_lock.acquire()
block_num = pending_block_num_q.popleft()
pending_block_num_deque_lock.release()
logger.debug('pop block number: {}'.format(block_num))
try:
block_info = gph.rpc.get_block(block_num)
time = block_info["timestamp"]
transactions = block_info["transactions"]
operations_list = parse_operations(gph, block_num, time, transactions)
#logger.debug('block: {}, trx_list: {}'.format(block_num, operations_list))
except Exception as e:
logger.error('parse block exception. block {}, error {}'.format(block_num, repr(e)))
if operations_list:
op_deque_lock.acquire()
op_d.append(operations_list)
op_deque_lock.release()
except Exception as e:
logger.error("pending_block_num_q: {}, except: '{}'".format(pending_block_num_q, repr(e)))
sleep(0.7)
#将区块数据写入数据库中block表中
def block2db():
while 1:
if block_info_q:
try:
#global block_info_deque_lock
block_info_deque_lock.acquire()
block = block_info_q.popleft()
block_info_deque_lock.release()
#update mongodb
conn = pymongo.MongoClient(mongodb_params['host'], mongodb_params['port'])
conn_db = conn[mongodb_params['db_name']]
try:
conn_db.block.insert_one({
'block_num': block["block_num"],
'time': block["time"],
'witness': block["witness"],
'witness_sign': block["witness_sign"],
'transactions_id': str(block["transactions_id"]),
'transactions_total': block["transactions_total"],
'operations_total': block["operations_total"]
})
except Exception as e:
logger.error("block: {}, except: '{}'".format(block["block_num"], repr(e)))
finally:
conn.close()
logger.info('block num: {} done.'.format(block["block_num"]))
except Exception as e:
logger.error("except: '{}'".format(repr(e)))
sleep(0.7)
#将区块解析过的数据写入到数据库中的op表和transaction表中
def data2db():
while 1:
if op_d:
try:
op_deque_lock.acquire()
operations_list = op_d.popleft()
op_deque_lock.release()
handle_operations(operations_list)
# status = handle_operations(operations_list)
# if not status:
# op_deque_lock.acquire()
# block_trx_ops = op_d.appendleft(operations_list)
# op_deque_lock.release()
# logger.warn('consume status {}, trx list: {}'.format(status, operations_list))
except Exception as e:
logger.error("except: '{}'".format(repr(e)))
sleep(0.5)
if __name__ == '__main__':
logger.info('args: {}'.format(sys.argv))
if len(sys.argv) < 3:
logger.error('Usage: python3 check.py block_number_start, block_number_end')
sys.exit(1)
start = int(sys.argv[1])
end = int(sys.argv[2])
if start > end or start <= 0 or end <= 0:
logger.error('block_number_start: {} > block_number_end: {} or start <= 0 or end <= 0'.format(start, end))
sys.exit(1)
args = [start, end]
init_gauges()
t1 = Thread(target=check_block, args=(args,))
t1.start()
t2 = Thread(target=block2db)
t2.start()
t3 = Thread(target=analysis_block)
t3.start()
t4 = Thread(target=data2db)
t4.start()
|
multiprocessing_wall_process_fytype.py
|
from multiprocessing import *
from ctypes import *
from fython.fytypes import *
x = Real(value=1)
print(x[:])
def f():
print(1)
print(x[:])
x[:] *= 10
print(x[:])
print(2)
if __name__ == '__main__':
p = Process(target=f)
p.start()
p.join()
print(3, x[:], p.exitcode)
|
transform.py
|
import os
import json
import multiprocessing
import random
input_data_path = "/data/disk2/private/zhx/scm/data/all/split"
output_data_path = "/data/disk2/private/zhx/scm/data/data/pretrain"
os.makedirs(output_data_path, exist_ok=True)
q = multiprocessing.Queue()
bq = multiprocessing.Queue()
file_list = []
cnt = 0
for filename in os.listdir(os.path.join(input_data_path)):
file_list.append(os.path.join(input_data_path, filename))
cnt += 1
print(cnt)
random.shuffle(file_list)
per_file = 1
cx = 0
for a in range(0, len(file_list), per_file):
cx += 1
arr = []
for b in range(a, min(a + per_file, len(file_list))):
arr.append(file_list[b])
q.put((cx, arr))
print(cx)
num_process = 20
split_list = ["。"]
word_to_dic = json.load(open("/data/disk2/private/zhx/scm/data/all/word2id.json", "r"))
def load(c):
if not (c in word_to_dic.keys()):
c = "[UNK]"
return word_to_dic[c]
def transform(id_, file_list):
f = open(os.path.join(output_data_path, str(id_)), "w")
for file_name in file_list:
inf = open(file_name, "r")
for line in inf:
x = json.loads(line)
arr = [[]]
s = x["QW"]
s = s.replace(" ", "").replace("\t", "")
for a in range(0, len(s)):
if s[a] in split_list:
if len(arr[-1]) == 0:
continue
arr.append([])
else:
arr[-1].append(load(s[a]))
while len(arr) > 0 and len(arr[-1]) == 0:
arr = arr[:-1]
if len(arr) == 0:
continue
print(len(arr), end=' ', file=f)
for a in range(0, len(arr)):
print(len(arr[a]), end=' ', file=f)
for b in range(0, len(arr[a])):
print(arr[a][b], end=' ', file=f)
print("", file=f)
f.close()
def work():
while True:
id_, file_list = q.get(timeout=5)
transform(id_, file_list)
bq.put(len(file_list))
if __name__ == "__main__":
process_list = []
for a in range(0, num_process):
process = multiprocessing.Process(target=work)
process_list.append(process)
process_list[-1].start()
done = 0
while done < cnt:
_ = bq.get()
done += _
print("%d/%d\r" % (done, cnt), end="")
|
mainUI.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainUI.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
import os
import sys
import threading
import time
import numpy
import addFont
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import yolo
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QLabel, QLineEdit
from PyQt5 import QtCore, QtGui,QtWidgets
from PyQt5.QtGui import QPalette, QPixmap, QImage
from PyQt5.QtCore import Qt, QThread, QObject, pyqtSignal, QDateTime, QTimer
def cv2ImgAddText(img, text, left, top): # 视频帧绘制中文
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fillColor = (255, 0, 0)
fontStyle = ImageFont.truetype("font/simsun.ttc", 20, encoding='utf-8')
draw.text((left, top - 20), text, font=fontStyle, fill=fillColor)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
#主窗体
class Ui_MainWindow(object): #主线程
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1000, 700)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(90, 110, 111, 81))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(90, 360, 111, 81))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(90, 230, 111, 81))
self.pushButton_3.setObjectName("pushButton_3")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(110, 90, 72, 15))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(110, 210, 72, 15))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(110, 340, 72, 15))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
#移除图片按钮
self.removePicTure = QtWidgets.QPushButton("移除图片",self.centralwidget)
self.removePicTure.setGeometry(QtCore.QRect(210, 110, 80, 81))
self.removePicTure.setObjectName("pushButton")
self.removePicTure.setObjectName("移除图片")
self.removePicTure.clicked.connect(self.RemovePicture)
#显示图片区域
self.Photelabel=QLabel("图片",self)
self.Photelabel.setGeometry(QtCore.QRect(300,120,60,15))
self.Photelabel.setPixmap(QPixmap("huajuan.jpg"))
self.Photelabel.resize(640,480)
self.Photelabel.setScaledContents(True)
#文本框
# create textbox
self.input = QLineEdit(self)
self.input.resize(400,100)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "识别图片"))
self.pushButton_2.setText(_translate("MainWindow", "实时识别"))
self.pushButton_3.setText(_translate("MainWindow", "识别视频"))
#定义信号 连接到槽函数#
self.pushButton.clicked.connect(self.ProcessPicture)
self.pushButton_2.clicked.connect(self.getFrame)
# self.pushButton_3.clicked.connect(self.ProcessVedio)
self.pushButton_3.clicked.connect(self.getFrame) #连接到时间槽函数
#槽函数的定义
def ProcessPicture(self):
print("按下了处理图片按钮")
fd = QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
jpg = QtGui.QPixmap(fd).scaled(self.Photelabel.width(), self.Photelabel.height()) #图片自适应
self.Photelabel.setPixmap(jpg)
def ProcessVedio(self):
print("按下了处理视频按钮")
fd= QFileDialog.getOpenFileName(self, '选择一个视频文件', './', 'ALL(*.*);;Videos(*.mp4)')
os.chdir(r'D:\Python\kears-yolov3-dev\kears-yolo-test\keras-yolo3-master') # 进入指定的目录
os.system("python yolo_video.py " + fd[0])
def RemovePicture(self):
self.cap.release()
self.Photelabel.setText(" ")
def getTime(self): #点击按钮,启动获取时间的线程
self.backend = BackendThread()
self.backend.update_time.connect(self.updateTimeUI) #线程绑定更新主线程的UI函数
self.backend.start()
def getFrame(self): #点击按钮,启动实时视频流的线程
th = threading.Thread(target=self.RealTimeThread) #创建视频线程
th.start()
def RealTimeThread(self): #实时识别的子线程,不断update视频帧在Qlabel上
# Load Yolo
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
cap = cv2.VideoCapture(0) # 打开摄像头
##############################################回传实时识别信号##########################################################
while True:
# # Start timer (for calculating frame rate)
# t1 = cv2.getTickCount()
ret, frame = cap.read()
height, width, channels = frame.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.4:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
price = 0
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[i]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 1)
frame = cv2ImgAddText(frame, label, x, y)
# price = price + sumPrice(label)
print('total price is ' + str(price))
frame = cv2ImgAddText(frame, '总价为: ' + str(price), 15, 20)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = QImage(frame.data, width, height, QImage.Format_RGB888)
self.Photelabel.setPixmap(QPixmap.fromImage(img))
self.Photelabel.setScaledContents(True)
def updateTimeUI(self,data): #更新主界面UI Time函数
self.input.setText(data)
class BackendThread(QtCore.QThread): #新开一个更新时间的子线程
update_time = pyqtSignal(str) #通过类成员对象定义信号对象
def run(self): #线程执行的操作 -> 实时识别
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString("yyyy-MM-dd hh:mm:ss")
self.update_time.emit(str(currentTime))
time.sleep(1)
class MyWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.setupUi(self)
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_())
|
test_client.py
|
import os
import socket
import threading
import time
import msgpack
import pytest
from pynats import NATSClient
@pytest.fixture
def nats_url():
return os.environ.get("NATS_URL", "nats://127.0.0.1:4222")
def test_connect_and_close(nats_url):
client = NATSClient(nats_url, socket_timeout=2)
client.connect()
client.ping()
client.close()
def test_connect_and_close_using_context_manager(nats_url):
with NATSClient(nats_url, socket_timeout=2) as client:
client.ping()
def test_connect_timeout():
client = NATSClient("nats://127.0.0.1:4223", socket_timeout=2)
with pytest.raises(socket.error):
client.connect()
def test_reconnect(nats_url):
client = NATSClient(nats_url, socket_timeout=2)
client.connect()
client.ping()
client.reconnect()
client.ping()
client.close()
def test_subscribe_unsubscribe(nats_url):
with NATSClient(nats_url, socket_timeout=2) as client:
sub = client.subscribe(
"test-subject", callback=lambda x: x, queue="test-queue", max_messages=2
)
client.unsubscribe(sub)
def test_subscribe_timeout(nats_url):
with NATSClient(nats_url, socket_timeout=2) as client:
sub = client.subscribe(
"test-subject", callback=lambda x: x, queue="test-queue", max_messages=1
)
with pytest.raises(socket.timeout):
client.wait(count=1)
client.unsubscribe(sub)
def test_publish(nats_url):
received = []
def worker():
with NATSClient(nats_url, socket_timeout=2) as client:
def callback(message):
received.append(message)
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_url, socket_timeout=2) as client:
# publish without payload
client.publish("test-subject")
# publish with payload
client.publish("test-subject", payload=b"test-payload")
t.join()
assert len(received) == 2
assert received[0].subject == "test-subject"
assert received[0].reply == ""
assert received[0].payload == b""
assert received[1].subject == "test-subject"
assert received[1].reply == ""
assert received[1].payload == b"test-payload"
def test_request(nats_url):
def worker():
with NATSClient(nats_url, socket_timeout=2) as client:
def callback(message):
client.publish(message.reply, payload=b"test-callback-payload")
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_url, socket_timeout=2) as client:
# request without payload
resp = client.request("test-subject")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert resp.payload == b"test-callback-payload"
# request with payload
resp = client.request("test-subject", payload=b"test-payload")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert resp.payload == b"test-callback-payload"
t.join()
def test_request_msgpack(nats_url):
def worker():
with NATSClient(nats_url, socket_timeout=2) as client:
def callback(message):
client.publish(
message.reply,
payload=msgpack.packb(
{b"v": 3338} if message.payload else {b"v": 32}
),
)
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_url, socket_timeout=2) as client:
# request without payload
resp = client.request("test-subject")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert msgpack.unpackb(resp.payload) == {b"v": 32}
# request with payload
resp = client.request("test-subject", payload=msgpack.packb("test-payload"))
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert msgpack.unpackb(resp.payload) == {b"v": 3338}
t.join()
def test_request_timeout(nats_url):
with NATSClient(nats_url, socket_timeout=2) as client:
with pytest.raises(socket.timeout):
client.request("test-subject")
|
chatserver.py
|
# chat-server.py
import socket
import datetime
import threading
import random
PORT = 8500
BUFSIZE = 4096
#SERVERIP = 'localhost' # YOUR IP
SERVERIP = '159.65.135.242'
clist = [] # Client List
cdict = {}
pvroom = {}
pvdict = {}
allroomnumber = []
allroomnumber.append(10001) # create default room
pvdict[10001] = [] # create default room
def client_handler(client,addr):
while True:
try:
data = client.recv(BUFSIZE)
check = data.decode('utf-8').split('|')
# check = ['NAME','UNCLE']
# check = ['ROOM','NEW']
# check = ['JOIN','19999','UNCLE']
# check = ['MSG','19999','HELLO WORLD']
if check[0] == 'NAME':
cdict[str(addr)] = check[1]
elif check[0] == 'ROOM':
rn = random.randint(10010,99999)
while rn in allroomnumber:
rn = random.randint(10010,99999)
allroomnumber.append(rn)
if rn not in pvdict:
pvdict[rn] = []
newroom = 'newroom|{}'.format(rn)
client.sendall(newroom.encode('utf-8'))
print('PVDICT:',pvdict)
elif check[0] == 'JOIN':
rnumber = int(check[1])
pvdict[rnumber].append(client)
cdict[str(addr)] = check[2]
pvroom[str(addr)] = rnumber
username = cdict[str(addr)]
for c in pvdict[rnumber]:
c.sendall('เพิ่ม {} เข้ากลุ่มแล้ว'.format(username).encode('utf-8'))
except Exception as e:
try:
try:
client.sendall('q'.encode('utf-8'))
except:
pass
print(e)
rnum = pvroom[str(addr)]
pvdict[rnum].remove(client)
username = cdict[str(addr)]
for c in pvdict[rnum]:
c.sendall('{} ได้ออกจากกลุ่มแล้ว'.format(username).encode('utf-8'))
print('REMAIN: ',len(pvdict[rnum]))
break
except Exception as e:
print(e)
break
cnlist = ['NAME','ROOM']
if check[0] not in cnlist:
if check[0] == 'MSG':
# check = ['MSG','170144','HELLO']
roomnumber = int(check[1])
try:
username = cdict[str(addr)]
msg = username + '>>> ' + check[2]
except:
msg = str(addr) + '>>> ' + check[2]
for c in pvdict[roomnumber]:
c.sendall(msg.encode('utf-8'))
client.close()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
server.bind((SERVERIP,PORT))
server.listen(5)
while True:
client, addr = server.accept()
print(addr)
print('ALL CLIENT: ', len(pvroom))
task = threading.Thread(target=client_handler, args=(client, addr))
task.start()
|
ha_tool.py
|
# -*- coding: utf-8 -*-
# HA - Tool version 2.1
# Lukas Krämer
# MIT License
# 2021
import os
import re
from shutil import move as move
from sys import exit as sys_exit
import threading
from time import sleep as sleep
from datetime import datetime
import pandas as pd
import sqlalchemy
from dotenv import load_dotenv, dotenv_values
class HaTool:
_fred = threading.Thread() # thread management
_lock = threading.Lock() # thread lock
_engine = None # Database connection
_config = dotenv_values(".env") # Env vars
_raw_data_table = _config["table_raw"] # table raw uploaded
_overview_table = _config["table_overview"] # summary table
_log_table = _config["created_trips_table"] # log filename in db
_path = _config["PathToTripData"] # path to the target path for txt data
_threads = _config["process"] # number of processes
def __init__(self):
"""check if System is ready configure"""
load_dotenv()
self._config = dotenv_values(".env")
if not (os.path.isdir(self._path)):
os.makedirs(self._path)
self._login_value()
self._todo_trips = []
self._task_list = None
def _login_value(self):
"""Connection to the Database and log """
db_user = self._config["DB_USERNAME"]
db_passwd = self._config["DB_PASSWORD"]
db_ip = self._config["DB_HOST"]
db_schema = self._config["DB_SCHEMA"]
# [driver]://[username][password]@[IP]/[Schema in DB]
db_uri = f'mysql+pymysql://{db_user}:{db_passwd}@{db_ip}:3306/{db_schema}'
self._engine = sqlalchemy.create_engine(db_uri) # connect to Database
try:
now = datetime.now()
data = {'username': [db_user], "time": [now.strftime("%d/%m/%Y, %H:%M:%S")], "Remote": [db_ip],
"OS": ["RPI"]}
pd.DataFrame(data).to_sql("python_log", con=self._engine, if_exists='append')
except Exception:
print("----------------\n\n Error while logging in Database\n\n----------------")
sys_exit()
def _get_last_trip(self, table, trip_id="trip_counter"):
"""return last trip on the Database"""
try:
return pd.read_sql_query(f'SELECT {trip_id} FROM {table} ORDER BY {trip_id} DESC limit 1;',
con=self._engine)
except Exception:
print(f'last trip Error \n{table} \n{trip_id}\n--------------------')
return pd.DataFrame({trip_id: [0]})
def _get_last_trip_number(self):
"""return the number of the last recorded Trip"""
try:
start_trip_number = int(self._get_last_trip(self._overview_table, "trip_number").at[0, 'trip_number'])
target_trip_number = self._get_last_trip(self._raw_data_table).at[0, 'trip_counter']
if target_trip_number == start_trip_number:
print("all uploaded")
return -1
else:
return start_trip_number
except Exception:
print("Error")
return 0
def _getMissiongSummaryTrips(self):
ids = []
try:
values = pd.read_sql_query(f'''SELECT trip_counter
FROM {self._raw_data_table}
WHERE {self._raw_data_table}.trip_counter NOT IN
(SELECT {self._overview_table}.trip_number FROM {self._overview_table})
group by trip_counter''',
con=self._engine)
for index, row in values.iterrows():
ids.append(row['trip_counter'])
except Exception:
print("Summary not founded")
values = pd.read_sql_query(f'''SELECT trip_counter FROM rawData order by trip_counter
desc limit 1''', con=self._engine)
for i in range(values['trip_counter'][0], 0, -1):
ids.append(i)
finally:
return ids
def _trip_handler(self, number_of_processes):
"""manage the Summary Calculator"""
tasks = self._task_list
# value = self._get_last_trip_number()
for i in range(number_of_processes):
self._todo_trips.append(tasks.pop())
run = True
while run:
for i in range(number_of_processes):
if self._todo_trips[i] == "next":
self._todo_trips[i] = tasks.pop()
if len(tasks) == 0:
run = False
print("everything started")
sys_exit()
def _duplicate_check(self, filename):
"""check if file exist in Database"""
try:
trip_list = pd.read_sql_query(f'SELECT filename FROM {self._log_table};', con=self._engine)
# Check if filename is registered in database
for index, row in trip_list.iterrows():
if row['filename'] == str(filename):
print("found duplicate")
return True
return False
except Exception:
print("duplicate error")
return False
def _upload_trips_raw(self):
"""upload all txt files to DB"""
path = self._path
try: # normal
self._get_last_trip_number()
counter = pd.read_sql_query(
f"SELECT trip_counter FROM {self._raw_data_table} ORDER BY trip_counter DESC limit 1;",
con=self._engine) # get last trip number from Database
finished = int(counter.at[0, 'trip_counter']) # last trip number from Database
except Exception:
finished = 0
regex = re.compile("Trip_20[1-3][0-9]-[0-2][0-9]-[0-3][0-9]_[0-3][0-9]-[0-9][0-9]-[0-9][0-9].txt")
for file in os.listdir(path):
if regex.match(file):
values_of_file = pd.read_csv(path + file, sep='\t')
if not self._duplicate_check(file):
finished = finished + 1
else:
continue
values_of_file['trip_counter'] = pd.DataFrame(
{'trip_counter': [finished for _ in range(len(values_of_file.index))]})
values_of_file.to_sql(self._raw_data_table, con=self._engine, if_exists='append', index='counter')
if not (os.path.isdir(path + "archive/")):
os.makedirs(path + "archive/")
move(path + file, path + 'archive/') # move finished file to archive
trip_log = {'filename': [str(file)],
'Datum': [datetime.now().strftime("%d/%m/%Y, %H:%M:%S")]
}
pd.DataFrame(trip_log).to_sql(self._log_table, con=self._engine, if_exists='append')
del values_of_file
sys_exit()
@staticmethod
def _dataframe_difference(df1, df2):
"""Find rows which are different between two DataFrames."""
comparison_df = df1.merge(df2,
indicator=True,
how='outer')
return comparison_df[comparison_df['_merge'] != 'both']
def _calc_summary(self, process_id):
"""gen _calc_summary trip by trip"""
try:
if self._todo_trips[process_id] == "finished":
sys_exit()
timeout = 0
while self._todo_trips[process_id] == "next":
sleep(1)
if timeout >= 12:
sys_exit()
timeout += 1
query = f"""
SELECT * FROM {self._raw_data_table}
WHERE trip_counter = {self._todo_trips[process_id]} ORDER BY time asc; """
trip_values_database = pd.read_sql_query(query, self._engine)
number_lines = trip_values_database.shape[0]
if number_lines == 0:
self._todo_trips[process_id] = "finished"
exit()
elif number_lines <= 20:
self._todo_trips[process_id] = "next"
self._calc_summary(process_id)
return
df4 = pd.DataFrame(columns=['soc'])
for x in range(0, number_lines): # remove all 0 from the Dataset
if trip_values_database.at[x, 'soc'] != 0:
soc_val = float(trip_values_database.at[x, 'soc'])
df4 = df4.append({'soc': soc_val}, ignore_index=True)
last_row = int(number_lines - 1)
if df4.shape[0] != 0:
c_soc_start = df4.at[0, "soc"]
c_soc_end = trip_values_database['soc'][number_lines - 1]
else:
c_soc_start = 0
c_soc_end = 0
consumption_average = float(trip_values_database['tripfuel'][last_row]) / 10 / float(
trip_values_database['trip_dist'][last_row]) # Consumption km / h
ev_proportion = (float(trip_values_database['trip_ev_dist'][last_row]) * 100) / float(
trip_values_database['trip_dist'][last_row]) # proportion of the usage of the electric engine
driving_stop = float(trip_values_database['trip_nbs'][last_row]) - float(
trip_values_database['trip_mov_nbs'][last_row]) # time of standing
# dataset for Database
regex = r"[0-2][0-9]:[0-5][0-9]"
summary_value = {'trip_number': trip_values_database['trip_counter'][1],
'day': pd.to_datetime(trip_values_database['Date'][0]).date(),
'time_Begins': re.match(regex, trip_values_database['Time'][0].replace(" ", ""))[0],
'time_End': re.match(regex, trip_values_database['Time'][last_row].replace(" ", ""))[0],
'km_start': trip_values_database['odo'][0],
'km_end': trip_values_database['odo'][last_row],
'trip_length': round(trip_values_database['trip_dist'][last_row], 2),
'trip_length_ev': round(trip_values_database['trip_ev_dist'][last_row], 2),
'driving': round(trip_values_database['trip_nbs'][last_row] / 60, 2),
'driving_ev': round(trip_values_database['trip_ev_nbs'][last_row] / 60, 2),
'driving_move': round(trip_values_database['trip_mov_nbs'][last_row] / 60, 4),
'driving_stop': round(int(driving_stop) / 60, 4),
'fuel': round(float(trip_values_database['tripfuel'][last_row]), 0),
'outside_temp': round(float(trip_values_database['ambient_temp'].max()), 2),
'outside_temp_average': round(float(trip_values_database['ambient_temp'].mean()), 2),
'soc_average': round(float(trip_values_database['soc'].mean()), 2),
'soc_minimum': round(float(df4['soc'].min()), 2),
'soc_maximal': round(float(trip_values_database['soc'].max()), 2),
'soc_start': round(float(c_soc_start), 2),
'soc_end': round(float(c_soc_end), 2),
'consumption_average': round(float(consumption_average), 2),
'ev_proportion': [int(ev_proportion)],
'speed_average': int(trip_values_database['speed_obd'].mean()),
'speed_max': [trip_values_database['speed_obd'].max()],
'soc_change': round(int(c_soc_end) - int(c_soc_start), 2),
'rotation_speed_average': round(trip_values_database['ice_rpm'].mean(), 0),
'rotation_speed_max': [trip_values_database['ice_rpm'].max()],
'engine load_average': round(trip_values_database['ice_load'].mean(), 0),
'engine_load_max': [trip_values_database['ice_load'].max()],
'battery_temp_max': round(trip_values_database['battery_temp'].max(), 2),
'battery_temp_average': round(trip_values_database['battery_temp'].mean(), 2),
'battery_temp_min': round(trip_values_database['battery_temp'].min(), 2),
'engine_cooling_temperature_max': round(trip_values_database['ice_temp'].max(), 2),
'engine_cooling_temperature_average': round(trip_values_database['ice_temp'].mean(), 2),
'engine_cooling_temperature_min': round(trip_values_database['ice_temp'].min(), 2),
'electric_motor_temp_max': round(trip_values_database['mg_temp'].max(), 2),
'electric_motor_temp_average': round(trip_values_database['mg_temp'].mean(), 2),
'electric_motor_temp_min': round(trip_values_database['mg_temp'].min(), 2),
'inverter_motor_temp_max': round(trip_values_database['inverter_temp'].max(), 2),
'inverter_motor_temp_average': round(trip_values_database['inverter_temp'].mean(), 2),
'inverter_motor_temp_min': round(trip_values_database['inverter_temp'].min(), 2),
'indoor_temp_max': round(trip_values_database['inhaling_temp'].max(), 2),
'indoor_temp_average': round(trip_values_database['inhaling_temp'].mean(), 2),
'indoor_temp_min': round(trip_values_database['inhaling_temp'].min(), 2)
}
overview_frame = pd.DataFrame(data=summary_value)
del trip_values_database
del summary_value
self._lock.acquire()
overview_frame.to_sql(self._overview_table,
index= False,
con=self._engine,
if_exists='append')
self._lock.release()
del overview_frame
self._todo_trips[process_id] = "next"
self._calc_summary(process_id=process_id)
except ZeroDivisionError:
self._todo_trips[process_id] = "next"
self._calc_summary(process_id=process_id)
print("Overview finished")
def start(self, program):
"""run the start with all parameter"""
number_of_processes = self._threads
if program == "trips":
p1 = threading.Thread(target=self._upload_trips_raw)
p1.start()
p1.join(300)
elif program == "calc_summary":
self._task_list = self._getMissiongSummaryTrips()
diff = len(self._task_list)
thread_count = 0
if diff == 0:
print("no new values")
sys_exit()
elif diff < int(number_of_processes):
print(f"less than {int(number_of_processes)} thread")
thread_count = int(diff)
else:
thread_count = int(number_of_processes)
threading.Thread(target=self._trip_handler, args=(thread_count,)).start()
timeout = 0
while timeout <= 15:
if len(self._todo_trips) == thread_count:
break
sleep(0.5)
for i in range(int(thread_count)):
threading.Thread(target=self._calc_summary, args=(i,)).start()
else:
print("unknown program")
if __name__ == "__main__":
ha = HaTool()
ha.start("trips")
ha.start("calc_summary")
|
component.py
|
# pylint: disable=unused-argument # W0613 Unused argument 'timeout' & 'input'
# pylint: disable=redefined-builtin # W0622 Redefining built-in 'input'
# pylint: disable=global-statement # W0603 global `_components`
import os
import sys
import copy
import time
import threading as mt
import radical.utils as ru
from .. import constants as rpc
from .. import states as rps
# ------------------------------------------------------------------------------
#
def out(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
# ------------------------------------------------------------------------------
#
_components = list()
def _atfork_child():
global _components
for c in _components:
c._subscribers = dict()
_components = list()
ru.atfork(ru.noop, ru.noop, _atfork_child)
# ------------------------------------------------------------------------------
#
class ComponentManager(object):
'''
RP spans a hierarchy of component instances: the application has a pmgr and
tmgr, and the tmgr has a staging component and a scheduling component, and
the pmgr has a launching component, and components also can have bridges,
etc. etc. This ComponentManager centralises the code needed to spawn,
manage and terminate such components - any code which needs to create
component should create a ComponentManager instance and pass the required
component and bridge layout and configuration. Callng `stop()` on the cmgr
will terminate the components and brisged.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg):
_components.append(self)
self._cfg = ru.Config('radical.pilot.cmgr', cfg=cfg)
self._sid = self._cfg.sid
self._uid = ru.generate_id('cmgr', ns=self._sid)
self._uids = [self._uid] # uids to track hartbeats for (incl. own)
self._prof = ru.Profiler(self._uid, ns='radical.pilot',
path=self._cfg.path)
self._log = ru.Logger(self._uid, ns='radical.pilot',
path=self._cfg.path)
self._prof.prof('init2', uid=self._uid, msg=self._cfg.path)
# Every ComponentManager runs a HB pubsub bridge in a separate thread.
# That HB channel should be used by all components and bridges created
# under this CMGR.
bcfg = ru.Config(cfg={'channel' : 'heartbeat',
'type' : 'pubsub',
'uid' : self._uid + '.hb',
'stall_hwm' : 1,
'bulk_size' : 0,
'path' : self._cfg.path})
self._hb_bridge = ru.zmq.PubSub(bcfg)
self._hb_bridge.start()
self._cfg.heartbeat.addr_pub = str(self._hb_bridge.addr_pub)
self._cfg.heartbeat.addr_sub = str(self._hb_bridge.addr_sub)
# runs a HB monitor on that channel
self._hb = ru.Heartbeat(uid=self.uid,
timeout=self._cfg.heartbeat.timeout,
interval=self._cfg.heartbeat.interval,
beat_cb=self._hb_beat_cb, # on every heartbeat
term_cb=self._hb_term_cb, # on termination
log=self._log)
self._hb_pub = ru.zmq.Publisher('heartbeat',
self._cfg.heartbeat.addr_pub,
log=self._log, prof=self._prof)
self._hb_sub = ru.zmq.Subscriber('heartbeat',
self._cfg.heartbeat.addr_sub,
topic='heartbeat', cb=self._hb_sub_cb,
log=self._log, prof=self._prof)
# confirm the bridge being usable by listening to our own heartbeat
self._hb.start()
self._hb.wait_startup(self._uid, self._cfg.heartbeat.timeout)
self._log.info('heartbeat system up')
# --------------------------------------------------------------------------
#
def _hb_sub_cb(self, topic, msg):
'''
keep track of heartbeats for all bridges/components we know
'''
# self._log.debug('hb_sub %s: get %s check', self.uid, msg['uid'])
if msg['uid'] in self._uids:
# self._log.debug('hb_sub %s: get %s used', self.uid, msg['uid'])
self._hb.beat(uid=msg['uid'])
# --------------------------------------------------------------------------
#
def _hb_beat_cb(self):
'''
publish own heartbeat on the hb channel
'''
self._hb_pub.put('heartbeat', msg={'uid' : self.uid})
# self._log.debug('hb_cb %s: put %s', self.uid, self.uid)
# --------------------------------------------------------------------------
#
def _hb_term_cb(self, uid=None):
self._log.debug('hb_term %s: %s died', self.uid, uid)
self._prof.prof('term', uid=self._uid)
# FIXME: restart goes here
# NOTE: returning `False` indicates failure to recover. The HB will
# terminate and suicidally kill the very process it is living in.
# Make sure all required cleanup is done at this point!
return None
# --------------------------------------------------------------------------
#
@property
def uid(self):
return self._uid
# --------------------------------------------------------------------------
#
@property
def cfg(self):
return self._cfg
# --------------------------------------------------------------------------
#
def start_bridges(self, cfg=None):
'''
check if any bridges are defined under `cfg['bridges']` and start them
'''
self._prof.prof('start_bridges_start', uid=self._uid)
timeout = self._cfg.heartbeat.timeout
if cfg is None:
cfg = self._cfg
for bname, bcfg in cfg.get('bridges', {}).items():
bcfg.uid = bname
bcfg.channel = bname
bcfg.cmgr = self.uid
bcfg.sid = cfg.sid
bcfg.path = cfg.path
bcfg.heartbeat = cfg.heartbeat
fname = '%s/%s.json' % (cfg.path, bcfg.uid)
bcfg.write(fname)
self._log.info('create bridge %s [%s]', bname, bcfg.uid)
out, err, ret = ru.sh_callout('radical-pilot-bridge %s' % fname)
self._log.debug('bridge startup out: %s', out)
self._log.debug('bridge startup err: %s', err)
if ret:
raise RuntimeError('bridge startup failed')
self._uids.append(bcfg.uid)
self._log.info('created bridge %s [%s]', bname, bcfg.uid)
# all bridges should start now, for their heartbeats
# to appear.
# self._log.debug('wait for %s', self._uids)
failed = self._hb.wait_startup(self._uids, timeout=timeout)
# self._log.debug('waited for %s: %s', self._uids, failed)
if failed:
raise RuntimeError('could not start all bridges %s' % failed)
self._prof.prof('start_bridges_stop', uid=self._uid)
# --------------------------------------------------------------------------
#
def start_components(self, cfg=None):
'''
check if any components are defined under `cfg['components']`
and start them
'''
self._prof.prof('start_components_start', uid=self._uid)
timeout = self._cfg.heartbeat.timeout
if cfg is None:
cfg = self._cfg
# we pass a copy of the complete session config to all components, but
# merge it into the component specific config settings (no overwrite),
# and then remove the `bridges` and `components` sections
#
scfg = ru.Config(cfg=cfg)
if 'bridges' in scfg: del(scfg['bridges'])
if 'components' in scfg: del(scfg['components'])
for cname, ccfg in cfg.get('components', {}).items():
for _ in range(ccfg.get('count', 1)):
ccfg.uid = ru.generate_id(cname, ns=self._sid)
ccfg.cmgr = self.uid
ccfg.kind = cname
ccfg.sid = cfg.sid
ccfg.base = cfg.base
ccfg.path = cfg.path
ccfg.heartbeat = cfg.heartbeat
ccfg.merge(scfg, policy=ru.PRESERVE, log=self._log)
fname = '%s/%s.json' % (cfg.path, ccfg.uid)
ccfg.write(fname)
self._log.info('create component %s [%s]', cname, ccfg.uid)
out, err, ret = ru.sh_callout('radical-pilot-component %s' % fname)
self._log.debug('out: %s' , out)
self._log.debug('err: %s' , err)
if ret:
raise RuntimeError('bridge startup failed')
self._uids.append(ccfg.uid)
self._log.info('created component %s [%s]', cname, ccfg.uid)
# all components should start now, for their heartbeats
# to appear.
failed = self._hb.wait_startup(self._uids, timeout=timeout * 10)
if failed:
raise RuntimeError('could not start all components %s' % failed)
self._prof.prof('start_components_stop', uid=self._uid)
# --------------------------------------------------------------------------
#
def close(self):
self._prof.prof('close', uid=self._uid)
self._hb_bridge.stop()
self._hb.stop()
# ------------------------------------------------------------------------------
#
class Component(object):
'''
This class provides the basic structure for any RP component which operates
on stateful things. It provides means to:
- define input channels on which to receive new things in certain states
- define work methods which operate on the things to advance their state
- define output channels to which to send the things after working on them
- define notification channels over which messages with other components
can be exchanged (publish/subscriber channels)
All low level communication is handled by the base class -- deriving classes
will register the respective channels, valid state transitions, and work
methods. When a 'thing' is received, the component is assumed to have full
ownership over it, and that no other component will change the 'thing's
state during that time.
The main event loop of the component -- `work()` -- is executed on `run()`
and will not terminate on its own, unless it encounters a fatal error.
Components inheriting this class should attempt not to use shared
resources. That will ensure that multiple instances of the component can
coexist for higher overall system throughput. Should access to shared
resources be necessary, it will require some locking mechanism across
process boundaries.
This approach should ensure that
- 'thing's are always in a well defined state;
- components are simple and focus on the semantics of 'thing' state
progression;
- no state races can occur on 'thing' state progression;
- only valid state transitions can be enacted (given correct declaration
of the component's semantics);
- the overall system is performant and scalable.
Inheriting classes SHOULD overload the following methods:
- `initialize()`:
- set up the component state for operation
- register input/output/notification channels
- register work methods
- register callbacks to be invoked on state notification
- the component will terminate if this method raises an exception.
- `work()`
- called in the main loop of the component process, on all entities
arriving on input channels. The component will *not* terminate if
this method raises an exception. For termination, `terminate()` must
be called.
- `finalize()`
- tear down the component (close threads, unregister resources, etc).
Inheriting classes MUST call the constructor:
class StagingComponent(rpu.Component):
def __init__(self, cfg, session):
rpu.Component.__init__(self, cfg, session)
A component thus must be passed a configuration (either as a path pointing
to a file name to be opened as `ru.Config`, or as a pre-populated
`ru.Config` instance). That config MUST contain a session ID (`sid`) for
the session under which to run this component, and a uid for the component
itself which MUST be unique within the scope of the given session. It MUST
further contain information about the session's heartbeat ZMQ pubsub channel
(`hb_pub`, `hb_sub`) on which heartbeats are sent and received for lifetime
management. All components and the session will continuously sent
heartbeat messages on that channel - missing heartbeats will by default lead
to session termination.
The config MAY contain `bridges` and `component` sections. If those exist,
the component will start the communication bridges and the components
specified therein, and is then considered an owner of those components and
bridges. As such, it much watch the HB channel for heartbeats from those
components, and must terminate itself if those go AWOL.
Further, the class must implement the registered work methods, with
a signature of:
work(self, things)
The method is expected to change the state of the 'thing's given. 'Thing's
will not be pushed to outgoing channels automatically -- to do so, the work
method has to call (see call documentation for other options):
self.advance(thing)
Until that method is called, the component is considered the sole owner of
the 'thing's. After that method is called, the 'thing's are considered
disowned by the component. If, however, components return from the work
methods without calling advance on the given 'thing's, then the component
keeps ownership of the 'thing's to advance it asynchronously at a later
point in time. That implies that a component can collect ownership over an
arbitrary number of 'thing's over time, and they can be advanced at the
component's discretion.
The component process is a stand-alone daemon process which runs outside of
Python's multiprocessing domain. As such, it can freely use Python's
multithreading (and it extensively does so by default) - but developers
should be aware that spawning additional *processes* in this component is
discouraged, as Python's process management is not playing well with it's
multithreading implementation.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
'''
This constructor MUST be called by inheriting classes, as it specifies
the operation mode of the component: components can spawn a child
process, or not.
If a child will be spawned later, then the child process state can be
initialized by overloading the`initialize()` method.
Note that this policy should be strictly followed by all derived
classes, as we will otherwise carry state over the process fork. That
can become nasty if the state included any form of locking (like, for
profiling or locking).
The symmetric teardown methods are called `finalize()`.
Constructors of inheriting components *may* call start() in their
constructor.
'''
# NOTE: a fork will not duplicate any threads of the parent process --
# but it will duplicate any locks which are shared between the
# parent process and its threads -- and those locks might be in
# any state at this point. As such, each child has to make
# sure to never, ever, use any of the inherited locks, but instead
# to create it's own set of locks in self.initialize.
self._cfg = cfg
self._uid = cfg.uid
self._session = session
# we always need an UID
assert(self._uid), 'Component needs a uid (%s)' % type(self)
# state we carry over the fork
self._debug = cfg.get('debug')
self._owner = cfg.get('owner', self.uid)
self._ctype = "%s.%s" % (self.__class__.__module__,
self.__class__.__name__)
self._number = cfg.get('number', 0)
self._name = cfg.get('name.%s' % self._number,
'%s.%s' % (self._ctype, self._number))
self._bridges = list() # communication bridges
self._components = list() # sub-components
self._inputs = dict() # queues to get things from
self._outputs = dict() # queues to send things to
self._workers = dict() # methods to work on things
self._publishers = dict() # channels to send notifications to
self._threads = dict() # subscriber and idler threads
self._cb_lock = mt.RLock()
# guard threaded callback invokations
self._work_lock = mt.RLock()
# guard threaded callback invokations
self._subscribers = dict() # ZMQ Subscriber classes
if self._owner == self.uid:
self._owner = 'root'
self._prof = self._session._get_profiler(name=self.uid)
self._rep = self._session._get_reporter(name=self.uid)
self._log = self._session._get_logger (name=self.uid,
level=self._debug)
# self._prof.register_timing(name='component_lifetime',
# scope='uid=%s' % self.uid,
# start='component_start',
# stop='component_stop')
# self._prof.register_timing(name='entity_runtime',
# scope='entity',
# start='get',
# stop=['put', 'drop'])
# self._prof.prof('init1', uid=self._uid, msg=self._prof.path)
self._q = None
self._in = None
self._out = None
self._poll = None
self._ctx = None
self._thread = None
self._term = mt.Event()
# --------------------------------------------------------------------------
#
def start(self):
sync = mt.Event()
self._thread = mt.Thread(target=self._worker_thread, args=[sync])
self._thread.daemon = True
self._thread.start()
while not sync.is_set():
if not self._thread.is_alive():
raise RuntimeError('worker thread died during initialization')
time.sleep(0.1)
assert(self._thread.is_alive())
# --------------------------------------------------------------------------
#
def _worker_thread(self, sync):
try:
self._initialize()
except Exception:
self._log.exception('worker thread initialization failed')
return
sync.set()
while not self._term.is_set():
try:
ret = self.work_cb()
if not ret:
break
except:
self._log.exception('work cb error [ignored]')
try:
self._finalize()
except Exception:
self._log.exception('worker thread finalialization failed')
# --------------------------------------------------------------------------
#
@staticmethod
def create(cfg, session):
# TODO: We keep this static typemap for component startup. The map
# should really be derived from rp module inspection via an
# `ru.PluginManager`.
#
from radical.pilot import worker as rpw
from radical.pilot import pmgr as rppm
from radical.pilot import tmgr as rptm
from radical.pilot import agent as rpa
from radical.pilot import raptor as rpt
# from radical.pilot import constants as rpc
comp = {
rpc.WORKER : rpt.Worker,
rpc.UPDATE_WORKER : rpw.Update,
rpc.STAGER_WORKER : rpw.Stager,
rpc.PMGR_LAUNCHING_COMPONENT : rppm.Launching,
rpc.TMGR_STAGING_INPUT_COMPONENT : rptm.Input,
rpc.TMGR_SCHEDULING_COMPONENT : rptm.Scheduler,
rpc.TMGR_STAGING_OUTPUT_COMPONENT : rptm.Output,
rpc.AGENT_STAGING_INPUT_COMPONENT : rpa.Input,
rpc.AGENT_SCHEDULING_COMPONENT : rpa.Scheduler,
rpc.AGENT_EXECUTING_COMPONENT : rpa.Executing,
rpc.AGENT_STAGING_OUTPUT_COMPONENT : rpa.Output
}
assert(cfg.kind in comp), '%s not in %s' % (cfg.kind, list(comp.keys()))
return comp[cfg.kind].create(cfg, session)
# --------------------------------------------------------------------------
#
def __str__(self):
return "%s <%s> [%s]" % (self.uid, self.__class__.__name__, self._owner)
# --------------------------------------------------------------------------
#
def _cancel_monitor_cb(self, topic, msg):
'''
We listen on the control channel for cancel requests, and append any
found UIDs to our cancel list.
'''
# FIXME: We do not check for types of things to cancel - the UIDs are
# supposed to be unique. That abstraction however breaks as we
# currently have no abstract 'cancel' command, but instead use
# 'cancel_tasks'.
self._log.debug('command incoming: %s', msg)
cmd = msg['cmd']
arg = msg['arg']
if cmd == 'cancel_tasks':
uids = arg['uids']
if not isinstance(uids, list):
uids = [uids]
self._log.debug('register for cancellation: %s', uids)
with self._cancel_lock:
self._cancel_list += uids
if cmd == 'terminate':
self._log.info('got termination command')
self.stop()
else:
self._log.debug('command ignored: %s', cmd)
return True
# --------------------------------------------------------------------------
#
@property
def cfg(self):
return copy.deepcopy(self._cfg)
@property
def session(self):
return self._session
@property
def uid(self):
return self._uid
@property
def ctype(self):
return self._ctype
# --------------------------------------------------------------------------
#
def _initialize(self):
'''
initialization of component base class goes here
'''
# components can always publish logs, state updates and control messages
# self.register_publisher(rpc.LOG_PUBSUB)
self.register_publisher(rpc.STATE_PUBSUB)
self.register_publisher(rpc.CONTROL_PUBSUB)
# set controller callback to handle cancellation requests
self._cancel_list = list()
self._cancel_lock = mt.RLock()
self.register_subscriber(rpc.CONTROL_PUBSUB, self._cancel_monitor_cb)
# call component level initialize
self.initialize()
self._prof.prof('component_init')
def initialize(self):
pass # can be overloaded
# --------------------------------------------------------------------------
#
def _finalize(self):
self._log.debug('_finalize()')
# call component level finalize, before we tear down channels
self.finalize()
for thread in self._threads.values():
thread.stop()
self._log.debug('%s close prof', self.uid)
try:
self._prof.prof('component_final')
self._prof.flush()
self._prof.close()
except Exception:
pass
def finalize(self):
pass # can be overloaded
# --------------------------------------------------------------------------
#
def stop(self, timeout=None): # noqa
'''
We need to terminate and join all threads, close all communication
channels, etc. But we trust on the correct invocation of the finalizers
to do all this, and thus here only forward the stop request to the base
class.
'''
# FIXME: implement timeout, or remove parameter
# (pylint W0613 should be removed if changes to timeout are applied)
self._log.info('stop %s (%s : %s) [%s]', self.uid, os.getpid(),
ru.get_thread_name(), ru.get_caller_name())
self._term.set()
# --------------------------------------------------------------------------
#
def register_input(self, states, input, worker=None):
'''
Using this method, the component can be connected to a queue on which
things are received to be worked upon. The given set of states (which
can be a single state or a list of states) will trigger an assert check
upon thing arrival.
This method will further associate a thing state with a specific worker.
Upon thing arrival, the thing state will be used to lookup the
respective worker, and the thing will be handed over. Workers should
call self.advance(thing), in order to push the thing toward the next
component. If, for some reason, that is not possible before the worker
returns, the component will retain ownership of the thing, and should
call advance() asynchronously at a later point in time.
Worker invocation is synchronous, ie. the main event loop will only
check for the next thing once the worker method returns.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
name = '%s.%s.%s' % (self.uid, worker.__name__,
'_'.join([str(s) for s in states]))
if name in self._inputs:
raise ValueError('input %s already registered' % name)
self._inputs[name] = {'queue' : self.get_input_ep(input),
'states' : states}
self._log.debug('registered input %s', name)
# we want exactly one worker associated with a state -- but a worker
# can be responsible for multiple states
for state in states:
self._log.debug('%s register input %s: %s', self.uid, state, name)
if state in self._workers:
self._log.warn("%s replaces worker %s (%s)"
% (self.uid, self._workers[state], state))
self._workers[state] = worker
self._log.debug('registered worker %s [%s]', worker.__name__, state)
# --------------------------------------------------------------------------
#
def unregister_input(self, states, input, worker):
'''
This methods is the inverse to the 'register_input()' method.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles statless entities
name = '%s.%s.%s' % (self.uid, worker.__name__,
'_'.join([str(s) for s in states]))
if name not in self._inputs:
self._log.warn('input %s not registered', name)
return
self._inputs[name]['queue'].stop()
del(self._inputs[name])
self._log.debug('unregistered input %s', name)
for state in states:
self._log.debug('%s unregister input %s (%s)', self.uid, name, state)
if state not in self._workers:
self._log.warn('%s input %s unknown', worker.__name__, state)
continue
del(self._workers[state])
# --------------------------------------------------------------------------
#
def register_output(self, states, output):
'''
Using this method, the component can be connected to a queue to which
things are sent after being worked upon. The given set of states (which
can be a single state or a list of states) will trigger an assert check
upon thing departure.
If a state but no output is specified, we assume that the state is
final, and the thing is then considered 'dropped' on calling advance() on
it. The advance() will trigger a state notification though, and then
mark the drop in the log. No other component should ever again work on
such a final thing. It is the responsibility of the component to make
sure that the thing is in fact in a final state.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
for state in states:
self._log.debug('%s register output %s:%s', self.uid, state, output)
# we want a *unique* output queue for each state.
if state in self._outputs:
self._log.warn("%s replaces output for %s : %s -> %s"
% (self.uid, state, self._outputs[state], output))
if not output:
# this indicates a final state
self._log.debug('%s register output to None %s', self.uid, state)
self._outputs[state] = None
else:
# non-final state, ie. we want a queue to push to:
self._outputs[state] = self.get_output_ep(output)
# --------------------------------------------------------------------------
#
def get_input_ep(self, input):
'''
return an input endpoint
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, input)
cfg = ru.read_json(fname)
return ru.zmq.Getter(input, url=cfg['get'])
# --------------------------------------------------------------------------
#
def get_output_ep(self, output):
'''
return an output endpoint
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, output)
cfg = ru.read_json(fname)
return ru.zmq.Putter(output, url=cfg['put'])
# --------------------------------------------------------------------------
#
def unregister_output(self, states):
'''
this removes any outputs registerd for the given states.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
for state in states:
self._log.debug('TERM : %s unregister output %s', self.uid, state)
if state not in self._outputs:
self._log.warn('state %s has no output registered', state)
# raise ValueError('state %s has no output registered' % state)
continue
del(self._outputs[state])
self._log.debug('unregistered output for %s', state)
# --------------------------------------------------------------------------
#
def output(self, things, state=None):
'''
this pushes the given things to the output queue register for the given
state
'''
# NOTE: we do not check if things are actually in the given state
things = ru.as_list(things)
if not things:
# nothing to do
return
if state not in self._outputs:
raise ValueError('state %s has no output registered' % state)
if self._outputs[state]:
# the bridge will sort things into bulks, wit bulk size dependig on
# bridge configuration
self._outputs[state].put(things)
# --------------------------------------------------------------------------
#
def register_timed_cb(self, cb, cb_data=None, timer=None):
'''
Idle callbacks are invoked at regular intervals -- they are guaranteed
to *not* be called more frequently than 'timer' seconds, no promise is
made on a minimal call frequency. The intent for these callbacks is to
run lightweight work in semi-regular intervals.
'''
name = "%s.idler.%s" % (self.uid, cb.__name__)
self._log.debug('START: %s register idler %s', self.uid, name)
with self._cb_lock:
if name in self._threads:
raise ValueError('cb %s already registered' % cb.__name__)
if timer is None: timer = 0.0 # NOTE: busy idle loop
else : timer = float(timer)
# create a separate thread per idle cb, and let it be watched by the
# ru.Process base class
#
# ------------------------------------------------------------------
# NOTE: idle timing is a tricky beast: if we sleep for too long,
# then we have to wait that long on stop() for the thread to
# get active again and terminate/join. So we always sleep
# just a little, and explicitly check if sufficient time has
# passed to activate the callback.
class Idler(mt.Thread):
# --------------------------------------------------------------
def __init__(self, name, log, timer, cb, cb_data, cb_lock):
self._name = name
self._log = log
self._timeout = timer
self._cb = cb
self._cb_data = cb_data
self._cb_lock = cb_lock
self._last = 0.0
self._term = mt.Event()
super(Idler, self).__init__()
self.daemon = True
self.start()
def stop(self):
self._term.set()
def run(self):
try:
self._log.debug('start idle thread: %s', self._cb)
ret = True
while ret and not self._term.is_set():
if self._timeout and \
self._timeout > (time.time() - self._last):
# not yet
time.sleep(0.1) # FIXME: make configurable
continue
with self._cb_lock:
if self._cb_data is not None:
ret = self._cb(cb_data=self._cb_data)
else:
ret = self._cb()
if self._timeout:
self._last = time.time()
except:
self._log.exception('idle thread failed: %s', self._cb)
# ------------------------------------------------------------------
idler = Idler(name=name, timer=timer, log=self._log,
cb=cb, cb_data=cb_data, cb_lock=self._cb_lock)
self._threads[name] = idler
self._log.debug('%s registered idler %s', self.uid, name)
# --------------------------------------------------------------------------
#
def unregister_timed_cb(self, cb):
'''
This method is reverts the register_timed_cb() above: it
removes an idler from the component, and will terminate the
respective thread.
'''
name = "%s.idler.%s" % (self.uid, cb.__name__)
self._log.debug('TERM : %s unregister idler %s', self.uid, name)
with self._cb_lock:
if name not in self._threads:
self._log.warn('timed cb %s is not registered', name)
# raise ValueError('%s is not registered' % name)
return
self._threads[name].stop() # implies join
del(self._threads[name])
self._log.debug("TERM : %s unregistered idler %s", self.uid, name)
# --------------------------------------------------------------------------
#
def register_publisher(self, pubsub):
'''
Using this method, the component can registered itself to be a publisher
of notifications on the given pubsub channel.
'''
assert(pubsub not in self._publishers)
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, pubsub)
cfg = ru.read_json(fname)
self._publishers[pubsub] = ru.zmq.Publisher(channel=pubsub,
url=cfg['pub'],
log=self._log,
prof=self._prof)
self._log.debug('registered publisher for %s', pubsub)
# --------------------------------------------------------------------------
#
def register_subscriber(self, pubsub, cb):
'''
This method is complementary to the register_publisher() above: it
registers a subscription to a pubsub channel. If a notification
is received on thag channel, the registered callback will be
invoked. The callback MUST have one of the signatures:
callback(topic, msg)
where 'topic' is set to the name of the pubsub channel.
The subscription will be handled in a separate thread, which implies
that the callback invocation will also happen in that thread. It is the
caller's responsibility to ensure thread safety during callback
invocation.
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, pubsub)
cfg = ru.read_json(fname)
if pubsub not in self._subscribers:
self._subscribers[pubsub] = ru.zmq.Subscriber(channel=pubsub,
url=cfg['sub'],
log=self._log,
prof=self._prof)
self._subscribers[pubsub].subscribe(topic=pubsub, cb=cb,
lock=self._cb_lock)
# --------------------------------------------------------------------------
#
def work_cb(self):
'''
This is the main routine of the component, as it runs in the component
process. It will first initialize the component in the process context.
Then it will attempt to get new things from all input queues
(round-robin). For each thing received, it will route that thing to the
respective worker method. Once the thing is worked upon, the next
attempt on getting a thing is up.
'''
# if there is nothing to check, idle a bit
if not self._inputs:
time.sleep(0.1)
return True
for name in self._inputs:
input = self._inputs[name]['queue']
states = self._inputs[name]['states']
# FIXME: a simple, 1-thing caching mechanism would likely
# remove the req/res overhead completely (for any
# non-trivial worker).
things = input.get_nowait(500) # in microseconds
things = ru.as_list(things)
if not things:
# return to have a chance to catch term signals
return True
# the worker target depends on the state of things, so we
# need to sort the things into buckets by state before
# pushing them
buckets = dict()
for thing in things:
state = thing.get('state') # can be stateless
uid = thing.get('uid') # and not have uids
self._prof.prof('get', uid=uid, state=state)
if state not in buckets:
buckets[state] = list()
buckets[state].append(thing)
# We now can push bulks of things to the workers
for state,things in buckets.items():
assert(state in states), 'cannot handle state %s' % state
assert(state in self._workers), 'no worker for state %s' % state
try:
to_cancel = list()
for thing in things:
uid = thing.get('uid')
# FIXME: this can become expensive over time
# if the cancel list is never cleaned
if uid and uid in self._cancel_list:
with self._cancel_lock:
self._cancel_list.remove(uid)
to_cancel.append(thing)
self._log.debug('got %s (%s)', uid, state)
if to_cancel:
# only advance stateful entities, otherwise just drop
if state:
self.advance(to_cancel, rps.CANCELED, publish=True,
push=False)
with self._work_lock:
self._workers[state](things)
except Exception:
# this is not fatal -- only the 'things' fail, not
# the component
self._log.exception("work %s failed", self._workers[state])
if state:
self.advance(things, rps.FAILED, publish=True,
push=False)
# keep work_cb registered
return True
# --------------------------------------------------------------------------
#
def advance(self, things, state=None, publish=True, push=False, ts=None,
prof=True):
'''
Things which have been operated upon are pushed down into the queues
again, only to be picked up by the next component, according to their
state model. This method will update the thing state, and push it into
the output queue registered as target for that state.
things: list of things to advance
state: new state to set for the things
publish: determine if state update notifications should be issued
push: determine if things should be pushed to outputs
prof: determine if state advance creates a profile event
(publish, and push are always profiled)
'Things' are expected to be a dictionary, and to have 'state', 'uid' and
optionally 'type' set.
If 'thing' contains an '$all' key, the complete dict is published;
otherwise, *only the state* is published.
This is evaluated in self.publish.
'''
if not things:
return
if not ts:
ts = time.time()
things = ru.as_list(things)
self._log.debug('advance bulk: %s [%s, %s]', len(things), push, publish)
# assign state, sort things by state
buckets = dict()
for thing in things:
uid = thing['uid']
# if thing['type'] not in ['task', 'pilot']:
# raise TypeError("thing has unknown type (%s)" % uid)
if state:
# state advance done here
thing['state'] = state
_state = thing['state']
if prof:
self._prof.prof('advance', uid=uid, state=_state, ts=ts)
if _state not in buckets:
buckets[_state] = list()
buckets[_state].append(thing)
# should we publish state information on the state pubsub?
if publish:
to_publish = list()
# If '$all' is set, we update the complete thing_dict.
# Things in final state are also published in full.
# If '$set' is set, we also publish all keys listed in there.
# In all other cases, we only send 'uid', 'type' and 'state'.
for thing in things:
if '$all' in thing:
del(thing['$all'])
if '$set' in thing:
del(thing['$set'])
to_publish.append(thing)
elif thing['state'] in rps.FINAL:
to_publish.append(thing)
else:
tmp = {'uid' : thing['uid'],
'type' : thing['type'],
'state' : thing['state']}
if '$set' in thing:
for key in thing['$set']:
tmp[key] = thing[key]
del(thing['$set'])
to_publish.append(tmp)
self.publish(rpc.STATE_PUBSUB, {'cmd': 'update', 'arg': to_publish})
# ts = time.time()
# for thing in things:
# self._prof.prof('publish', uid=thing['uid'],
# state=thing['state'], ts=ts)
# never carry $all and across component boundaries!
for thing in things:
if '$all' in thing:
del(thing['$all'])
# should we push things downstream, to the next component
if push:
# the push target depends on the state of things, so we need to sort
# the things into buckets by state before pushing them
# now we can push the buckets as bulks
for _state,_things in buckets.items():
# ts = time.time()
if _state in rps.FINAL:
# things in final state are dropped
for thing in _things:
self._log.debug('final %s [%s]', thing['uid'], _state)
self._prof.prof('drop', uid=thing['uid'], state=_state,
ts=ts)
continue
if _state not in self._outputs:
# unknown target state -- error
for thing in _things:
self._log.debug("lost %s [%s]", thing['uid'], _state)
self._prof.prof('lost', uid=thing['uid'], state=_state,
ts=ts)
continue
if not self._outputs[_state]:
# empty output -- drop thing
for thing in _things:
self._log.debug('drop %s [%s]', thing['uid'], _state)
self._prof.prof('drop', uid=thing['uid'], state=_state,
ts=ts)
continue
output = self._outputs[_state]
# push the thing down the drain
self._log.debug('put bulk %s: %s', _state, len(_things))
output.put(_things)
ts = time.time()
for thing in _things:
self._prof.prof('put', uid=thing['uid'], state=_state,
msg=output.name, ts=ts)
# --------------------------------------------------------------------------
#
def publish(self, pubsub, msg):
'''
push information into a publication channel
'''
if not self._publishers.get(pubsub):
raise RuntimeError("no msg route for '%s': %s" % (pubsub, msg))
self._publishers[pubsub].put(pubsub, msg)
# ------------------------------------------------------------------------------
#
class Worker(Component):
'''
A Worker is a Component which cannot change the state of the thing it
handles. Workers are employed as helper classes to mediate between
components, between components and database, and between components and
notification channels.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
Component.__init__(self, cfg=cfg, session=session)
# ------------------------------------------------------------------------------
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient
import threading
import random
from datetime import datetime
import sys
import os
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containting the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
if is_master(config):
config.hge_ctx_gql_server = HGECtxGQLServer()
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
node.slaveinput["hge-url"] = node.config.hge_url_list.pop()
node.slaveinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.slaveinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.slaveinput["pg-url"]
hge_key = config.getoption('--hge-key')
hge_webhook = config.getoption('--hge-webhook')
webhook_insecure = config.getoption('--test-webhook-insecure')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
hge_jwt_conf = config.getoption('--hge-jwt-conf')
ws_read_cookie = config.getoption('--test-ws-init-cookie')
metadata_disabled = config.getoption('--test-metadata-disabled')
hge_scale_url = config.getoption('--test-hge-scale-url')
try:
hge_ctx = HGECtx(
hge_url=hge_url,
pg_url=pg_url,
hge_key=hge_key,
hge_webhook=hge_webhook,
webhook_insecure=webhook_insecure,
hge_jwt_key_file=hge_jwt_key_file,
hge_jwt_conf=hge_jwt_conf,
ws_read_cookie=ws_read_cookie,
metadata_disabled=metadata_disabled,
hge_scale_url=hge_scale_url
)
except HGECtxError as e:
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
client = GQLWsClient(hge_ctx)
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def setup_ctrl(request, hge_ctx):
"""
This fixure is used to store the state of test setup in some test classes.
Used primarily when teardown is skipped in some test cases in the class where the test is not expected to change the database state.
"""
setup_ctrl = { "setupDone" : False }
yield setup_ctrl
hge_ctx.may_skip_test_teardown = False
request.cls().do_teardown(setup_ctrl, hge_ctx)
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput')
|
1mtc_south.py
|
from __future__ import print_function
from pyfrac.utils import pyfraclogger
from pyfrac.control import keyboard
from pyfrac.acquire import capture
import multiprocessing
import atexit
import json
import pika
import time
import os
import logging
logging.getLogger('pika').setLevel(logging.DEBUG)
logger = pyfraclogger.pyfraclogger(tofile=True)
RPC_QUEUE_NAME = "1mtcSouth_ir_queue"
RPC_VHOST = "/ir"
IR_IMAGE_DIR = os.getenv('mtc_ir_dir')
SOUTH_IR_IMG_DIR = os.path.join(IR_IMAGE_DIR, 'South', 'live')
SOUTH_IRCAM_IP = os.getenv("south_ircam_ip")
SOUTH_IRCAM_FTP_UNAME = os.getenv("south_ircam_ftp_uname")
SOUTH_IRCAM_FTP_PASS = os.getenv("south_ircam_ftp_pass")
# String to insert in the filename
SOUTH_LOC_STRING = "south"
def _initialize(cam_lock, capture_event, frames_captured,
count, recent_error, interval, capture_die):
"""
Setup the global events that will be used
to trigger the capture loop's different functions
in separate processes
Parameters:
----------
cam_lock: `multiprocessing.Lock`
For obtaining exclusive lock so that two
commands cannot be sent to the camera
simultaneously.
.. note: Camera's buffer overflows when it gets hit by
commands at more than 1Hz.
capture_event: `multiprocessing.Event`
This will be used to trigger the capture
start on the cam
frames_captured: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames captured
within the capture loop
count: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames
to be captured within the capture loop
recent_error: `multiprocessing.Manager.Value`
This will be used to report the most recent error that
occured during capture or some other process
interval: `multiprocessing.Manager.Value`
This will be used to exchange the number of seconds
to wait between successive frame captures
within the capture loop
"""
logger.info("INITIALIZING")
_capture.cam_lock = cam_lock
_capture.capture_event = capture_event
_capture.frames_captured = frames_captured
_capture.count = count
_capture.recent_error = recent_error
_capture.interval = interval
_capture.capture_die = capture_die
def _capture(cam, *args):
"""
Responsible for capturing images from the camera.
!!Do not call this method manually!!
.. note: Refer `_initialize()`
Parameters:
----------
cam: ICDA320 camera object
Camera object using which capture
operations needs to be performed
"""
multiprocessing.current_process().name = "IRCaptureLoop"
_capture.frames_captured.value = 0
try:
while not _capture.capture_die.get():
try:
_capture.capture_event.wait()
with _capture.cam_lock:
start_time = time.time()
if _capture.count.get() == -1:
fname = str(cam.capture(img_name=str(SOUTH_LOC_STRING)+"-")) +\
".jpg"
cam.fetch(filename="", pattern="jpg")
_capture.frames_captured.value += 1
elif _capture.count.get() > 0:
fname = str(cam.capture(img_name=str(SOUTH_LOC_STRING)+"-")) +\
".jpg"
cam.fetch(filename="", pattern="jpg")
# Increment frames captured count
_capture.frames_captured.value += 1
_capture.count.value -= 1
elif _capture.count.get() == 0:
_capture.capture_event.clear()
time.sleep(_capture.interval.get() - (time.time() - start_time))
except Exception as ex:
logger.error("Error in _capture process: "+str(ex))
_capture.recent_error.value = "Error in _capture process: "+str(ex)
#_capture.capture_event.clear()
continue
else:
cam.cleanup()
except KeyboardInterrupt as ki:
logger.info("Exiting from "+str(multiprocessing.current_process().name))
def camera_commands(cam, cam_lock, capture_event, frames_captured,
count, recent_error, interval, command_dict):
"""
Perform actions on the camera based on
the command dictionary
Parameters:
----------
cam: ICDA320 camera object
cam_lock: `multiprocessing.Lock`
For obtaining exclusive lock so that two
commands cannot be sent to the camera
simultaneously.
.. note: Camera's buffer overflows when it gets hit by
commands at more than 1Hz.
capture_event: `multiprocessing.Event`
This will be used to trigger the capture
start on the cam
frames_captured: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames captured
within the capture loop
count: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames
to be captured within the capture loop
recent_error: `multiprocessing.Manager.Value`
This will be used to report the most recent error that
occured during capture or some other process
interval: `multiprocessing.Manager.Value`
This will be used to exchange the number of seconds
to wait between successive frame captures
within the capture loop
command_dict: dictionary containing (k,v)
pairs for following keys:
capture: `bool`
interval: `str`
stop: `bool`
status: `bool`
focus: `int`
zoom: `int`
"""
def _current_status(msg="", **kwargs):
"""
This function will return the status
of the capture system
Parameters:
----------
msg: str, optional
If any custom message needs to be returned
"""
with cam_lock:
kwargs.update({
"capture": count.get(),
"interval": interval.get(),
"zoom": cam.zoom(),
"focus": cam.focus(),
"frames_captured": frames_captured.get(),
"msg": msg,
"recent_error": recent_error.get()
})
return kwargs
try:
if command_dict["stop"]:
# Stop capturing images
logger.info("Stopping current capture")
capture_event.clear()
if command_dict["status"]:
return _current_status()
if command_dict["zoom"] > 0:
cam.zoom(int(command_dict["zoom"]))
if command_dict["focus"]:
cam.focus(command_dict["focus"])
# Make sure before starting capture
# - any previous capture is not running
# - interval value is provided
if command_dict["capture"]:
if not capture_event.is_set():
if command_dict["interval"] > 0:
interval.value = command_dict["interval"]
frames_captured.value = 0
if command_dict["count"] > 0:
# Start capturing X images
count.value = command_dict["count"]
capture_event.set()
elif command_dict["count"] <= -1:
count.value = command_dict["count"]
capture_event.set()
else:
logger.warning("Cannot start capture without the interval field")
else:
logger.warning("Previous capture is already in progress")
return _current_status(msg="Previous capture is already in progress")
return _current_status()
except Exception as ex:
logger.warning("Couldn't execute following camera commands: "+str(ex)+\
"\n"+str(command_dict))
return _current_status(msg="Couldn't execute following camera commands: "+str(ex)+\
"\n"+str(command_dict))
finally:
# Reset the recent error after it has been sent once
recent_error.value = ""
def killChildProc(process, die):
"""
Kills child processes before terminating
due to some non-fatal (and non signal)
interrupt. e.g. ctrl c or an exception
"""
logger.warning("Killing: " + str(process))
die.value = True
time.sleep(2)
process.terminate()
process.join()
if __name__ == "__main__":
# Obtain the camera
logger.info("Obtaining Camera ... ")
south_cam = capture.ICDA320(tn_host=SOUTH_IRCAM_IP,
tn_port=23,
ftp_host=SOUTH_IRCAM_IP,
ftp_port=21,
ftp_username=SOUTH_IRCAM_FTP_UNAME,
ftp_password=SOUTH_IRCAM_FTP_PASS,
ir_image_dir=SOUTH_IR_IMG_DIR)
# Manager responsible for exchanging messages with
# other process
mp_manager = multiprocessing.Manager()
# Setup events and shared Value
cam_lock = multiprocessing.Lock()
capture_event = mp_manager.Event()
recent_error = mp_manager.Value("recent_error", "")
frames_captured = mp_manager.Value('frames_captured', 0)
count = mp_manager.Value('count', 0)
interval = mp_manager.Value('interval', 0)
die = mp_manager.Value('die', False)
# Setup pool, initialize shared objects and start the process
logger.info("Starting camera capture process ... ")
_initialize(cam_lock, capture_event, frames_captured,
count, recent_error, interval, die)
process = multiprocessing.Process(target=_capture, args=(south_cam,))
process.start()
# graceful exit (for SIGINT & SIGQUIT)
atexit.register(killChildProc, process, die)
# RPC connection setup
logger.info("Setting up RPC connection")
credentials = pika.PlainCredentials(os.getenv("rpc_user"), os.getenv("rpc_pass"))
connection = pika.BlockingConnection(
pika.ConnectionParameters(os.getenv("rpc_server"), os.getenv("rpc_port"),
RPC_VHOST, credentials))
channel = connection.channel()
channel.queue_declare(queue=RPC_QUEUE_NAME)
def on_request(ch, method, props, body):
"""
Blocking Function for handling the incoming data
Refer "http://pika.readthedocs.io/en/0.11.2/modules/adapters/blocking.html"
"""
command_dict = json.loads(body)
logger.debug("Correlation id: " + str(props.correlation_id))
response = camera_commands(south_cam, cam_lock, capture_event,
frames_captured, count, recent_error,
interval, command_dict)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue=RPC_QUEUE_NAME)
logger.info("Listening for RPC messages")
channel.start_consuming()
except KeyboardInterrupt as ki:
print()
logger.info("Exiting now")
except Exception as ex:
logger.critical("Critical Exception in main: "+str(ex))
|
agent.py
|
# -*- coding:utf-8 -*-
import math
import time
import numpy as np
import torch
import torch.multiprocessing as mp
from tbase.agents.base.base_agent import BaseAgent
from tbase.agents.base.explore import explore, simple_explore
from tbase.common.cmd_util import make_env
from tbase.common.logger import logger
from tbase.common.replay_buffer import ReplayBuffer
from tbase.network.polices import get_policy_net
class Agent(BaseAgent):
def __init__(self, env, args, *other_args):
# change to random policy
args.policy_net = "Random"
super(Agent, self).__init__(env, args, other_args)
self.policy = get_policy_net(env, args)
self.num_env = args.num_env
self.envs = []
self.states = []
self.memorys = []
for i in range(self.num_env):
env = make_env(args=args)
state = env.reset()
self.envs.append(env)
self.states.append(state)
self.memorys.append(ReplayBuffer(1e5))
self.queue = mp.Queue()
def simple_explore(self):
t_start = time.time()
reward_log, portfolios = simple_explore(
self.envs[0], self.states[0], self.memorys[0],
self.policy, self.args.explore_size, self.args.print_action)
used_time = time.time() - t_start
return np.mean(reward_log), used_time, portfolios
def explore(self):
t_start = time.time()
queue = mp.Queue()
thread_size = int(math.floor(self.args.explore_size / self.num_env))
workers = []
for i in range(self.num_env):
worker_args = (i, queue, self.envs[i], self.states[i],
self.memorys[i], self.policy, thread_size,
self.args.print_action)
workers.append(mp.Process(target=explore, args=worker_args))
for worker in workers:
worker.start()
reward_log = []
portfolios = []
for _ in range(self.num_env):
i, _, memory, env, state, rewards, portfolio = queue.get()
self.memorys[i] = memory
self.envs[i] = env
self.states[i] = state
reward_log.extend(rewards)
portfolios.extend(portfolio)
used_time = time.time() - t_start
return np.mean(reward_log), used_time, portfolios
def learn(self):
logger.info("learning started")
i = 0
current_portfolio = 1.0
t_start = time.time()
for i_iter in range(self.args.max_iter_num):
[avg_reward, e_t, ports] = [None] * 3
if self.args.num_env == 1:
avg_reward, e_t, ports = self.simple_explore()
else:
avg_reward, e_t, ports = self.explore()
# NOTE: Don't need update parameters
for p in ports:
i += 1
self.writer.add_scalar('reward/portfolio', p, i)
current_portfolio = p
if current_portfolio > self.best_portfolio:
self.best_portfolio = current_portfolio
logger.info("iter: %d, new best portfolio: %.3f" % (
i_iter + 1, self.best_portfolio))
self.writer.add_scalar('time/explore', e_t, i_iter)
self.writer.add_scalar('reward/policy',
torch.tensor(avg_reward), i_iter)
if (i_iter + 1) % self.args.log_interval == 0:
msg = "total update time: %.1f secs" % (time.time() - t_start)
msg += ", iter=%d, avg_reward=%.3f" % (i_iter + 1, avg_reward)
msg += ", current_portfolio: %.3f" % current_portfolio
logger.info(msg)
self.writer.close()
logger.info("Final best portfolio: %.3f" % self.best_portfolio)
self.save_best_portofolio(self.model_dir)
def eval(self):
pass
|
fact_proj.py
|
from functionalities.rsa import RSA_key
from Crypto.Util import number
from functionalities.sqroot import sqroot
import multiprocessing
from time import perf_counter
from functionalities.time_format import time_format
from functionalities.prime_test import gen_primes
import json
class RSA_cracker():
"""Class for the actual cracker. The constructor requires a RSA public key to initiate this object."""
def __init__(self, key: RSA_key):
self.cores: int = multiprocessing.cpu_count() # num of cores == num of spawned processes
self.key: RSA_key = key
self.queue: multiprocessing.Queue = multiprocessing.Queue() # a way to output from multiprocessing
self.p: int = 0 # prime 1
self.q: int = 0 # prime 2
self.timers_per_thousand: list = [] # every process measures the time it takes to iterate through 1000 numbers. Used for time estimation
self.biggest: int = sqroot(self.key.mod) # biggest possible prime number
self.smallest: int = 2**((key.bitlength//2)-1) # smallest possible prime number
self.number_count_per_proces: int = (self.biggest - self.smallest) // self.cores # how many numbers there is per core?
self.starting_points: list = [int(self.biggest)] # starting points for different cores in order to split the huge number interval between cores
for core in range(self.cores - 1):
self.starting_points.append(self.starting_points[-1] - self.number_count_per_proces) # here we fill it
self.private_key: int = None # output
def factorization(self, starting_point: int):
"""Method that cracks the modulo. Is passed to a multiprocessing library."""
max_fact = starting_point
if max_fact % 2 == 0:
max_fact -= 1 # we want to start on an odd number
res = max_fact % 1000 # residuum after dividing by 1000 (to find the point where we stop measuring time)
ref = perf_counter() # time reference
last_stop = 0 # where do we stop measuring time and start going full speed
for i in range(max_fact, 1, -2): # loop with time measuring, hopping on odd numbers from higher to lower
if self.key.mod % i == 0: # have we found the prime number?
self.queue.put([i, self.key.mod//i]) # we output list of the 2 primes
if i % 1000 == res: # have we reached 1000 iterations yet?
self.queue.put(perf_counter() - ref) # we output it into the same queue in order not to wait for time measurement when answer has already been found
last_stop = i # save where we interrupt the loop
break
for i in range(last_stop, 1, -2): # countinue cracking at full speed
if self.key.mod % i == 0:
self.queue.put([i, self.key.mod//i])
def factorization_with_prime_test(self, starting_point: int):
"""Discontinued, experimental, slower version of factorization. Do not use this!"""
ref = perf_counter()
last_stop = 0
for i in gen_primes(starting_point): # uses primetest: gen_primes to generate numbers to iterate through.
if self.key.mod % i == 0:
self.queue.put([i, self.key.mod // i])
if starting_point - i >= 1000: # we cannot measuer the 1000 numbers exactly here because we skip a lot of numbers
self.queue.put(perf_counter() - ref)
last_stop = i
break
for i in gen_primes(last_stop): # same logic as in factorization
if self.key.mod % i == 0:
self.queue.put([i, self.key.mod//i])
def stop(self, processes): # processes cannot be a cracker's attribute due to a bug on MS Windows' implementation of Python 3.9 and newer
"""Takes Iterable of cracker's processes and stops them."""
for process in processes:
process.terminate()
def start(self, with_prime_test: bool = False, silent: bool = False):
"""Start the cracker. Please do not use the version with prime test."""
processes = [] # list of processes in order to stop them when needed
for i in range(self.cores): # MULTIPROCESSING LOGIC
pr: multiprocessing.Process = None
if with_prime_test: # seriously pls don't use this, it froze my PC
pr = multiprocessing.Process(target=self.factorization_with_prime_test, args=(self.starting_points[i],))
else: # this is much better and resource-light
pr = multiprocessing.Process(target=self.factorization, args=(self.starting_points[i],))
processes.append(pr)
pr.start()
try: # to gracefully stop when being KeyboardInterrupted
temp = None # temporary variable that reads from the multiprocessing queue and sorts answer from time estimates
while temp is None: # we basically just cycle through and await the answers from the queue
temp = self.queue.get()
if isinstance(temp, list) and len(temp) == 2: # this means that in the queue is a list of the 2 primes
self.p, self.q = temp
break # no need to continue, we got the answer
if isinstance(temp, float): # in the queue is a time period per 1000 iterated numbers of one of the cores
self.timers_per_thousand.append(temp)
if len(self.timers_per_thousand) == self.cores: # if the list is full (every core provided an answer)
avg_time = sum(self.timers_per_thousand) / self.cores # average time from the list
avg_time = avg_time * ((self.biggest - self.smallest) // 1000) # how long would it take to go through the whole interval at this speed?
if not silent: # used in cli.py, if you don't want any output, you won't get it. Ez pz
print(f"Estimated time: {time_format(avg_time)}")
temp = None # we delete the temporary variable and await another outputs from the queue
self.private_key = number.inverse(self.key.public, (self.p - 1) * (self.q - 1)) # private key calculation
finally: # here we kill all processes
self.stop(processes)
def save_key(self, path: str):
"""Save the RSA key to a JSON file. Returns True if the saving went through, False otherwise."""
if not path.endswith(".json"):
path += ".json"
try:
with open(path, 'w') as out:
jstream = {"rsa_modulo": self.key.mod, "rsa_public_exponent": self.key.public, "rsa_private_key": self.private_key}
out.write(json.dumps(jstream))
return path
except IOError:
return False
if __name__ == "__main__": # testing purposes
bitlength = abs(int(input("Enter bitlength of modulo: ")))
key = RSA_key().generate(bitlength)
print(f"Modulo is: {key.mod}")
print(f"Public exponent is: {key.public}")
cracker = RSA_cracker(key)
ref = perf_counter()
print("Cracking, please wait...")
cracker.start()
print(f"Found primes: {cracker.p}, {cracker.q}")
print(f"Found private exponent: {cracker.private_key}")
print("Time in seconds: %.2f" % (perf_counter() - ref))
input("Program finished, press enter.")
|
static.py
|
import datetime
import telegram
from threading import Thread
from utility.setting import system_path
f = open(f'{system_path}/utility/user.txt')
lines = f.readlines()
bot = lines[2].strip()
user_id = lines[3].strip()
f.close()
def telegram_msg(text):
try:
telegram.Bot(bot).sendMessage(chat_id=user_id, text=text)
except Exception as e:
print(f'텔레그램 설정 오류 알림 - telegram_msg {e}')
def thread_decorator(func):
def wrapper(*args):
Thread(target=func, args=args, daemon=True).start()
return wrapper
def now():
return datetime.datetime.now()
def timedelta_sec(second, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(seconds=second)
else:
next_time = std_time + datetime.timedelta(seconds=second)
return next_time
def timedelta_hour(hour, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(hours=hour)
else:
next_time = std_time + datetime.timedelta(hours=hour)
return next_time
def timedelta_day(day, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(days=day)
else:
next_time = std_time + datetime.timedelta(days=day)
return next_time
def strp_time(timetype, str_time):
return datetime.datetime.strptime(str_time, timetype)
def strf_time(timetype, std_time=None):
if std_time is None:
str_time = now().strftime(timetype)
else:
str_time = std_time.strftime(timetype)
return str_time
|
LR4.py
|
"""
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import threading
import time
import numpy as np
import cv2
import sys
from PyQt5 import QtWidgets
from PyQt5.QtGui import QPixmap
import qimage2ndarray
import random
from tkinter import filedialog
import tkinter
root = tkinter.Tk()
root.withdraw()
import gui_4
def valmap(value, istart, istop, ostart, ostop):
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))
class LR4(QtWidgets.QMainWindow, gui_4.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.btn_camera_start.clicked.connect(self.camera_start)
self.btn_camera_stop.clicked.connect(self.camera_stop)
self.btn_browse.clicked.connect(self.browse_file)
self.btn_save_current.clicked.connect(self.save_current)
self.btn_start_multiple.clicked.connect(self.start_multiple)
self.btn_stop_multiple.clicked.connect(self.stop_multiple)
self.cv_cap = None
self.camera_running = False
self.save_running = False
self.current_frame = np.zeros((480, 640, 3), np.uint8)
def camera_start(self):
self.camera_running = True
if self.check_dshow.isChecked():
self.cv_cap = cv2.VideoCapture(self.camera_id.value(), cv2.CAP_DSHOW)
else:
self.cv_cap = cv2.VideoCapture(self.camera_id.value())
thread = threading.Thread(target=self.cv_thread)
thread.start()
pass
def camera_stop(self):
self.camera_running = False
def browse_file(self):
files = [('PNG Image', '*.png'),
('JPG Image', '*.jpg')]
file = filedialog.asksaveasfilename(filetypes=files, defaultextension=files)
if file is not None and len(file) > 0:
self.line_file.setText(file)
def save_current(self):
if self.camera_running:
if len(self.line_file.text()) > 0:
cv2.imwrite(self.line_file.text(), self.current_frame)
print('File ' + str(self.line_file.text()) + ' saved.')
else:
print('Empty filename!')
else:
print('Camera not started!')
def start_multiple(self):
if self.camera_running:
if len(self.line_file.text()) > 0:
self.btn_start_multiple.setEnabled(False)
self.save_running = True
thread = threading.Thread(target=self.multiple_saving)
thread.start()
else:
print('Empty filename!')
else:
print('Camera not started!')
def multiple_saving(self):
iterations_counter = 0
files_counter = 0
# radio_infinite, radio_limit, spin_limit, spin_interval
while self.save_running:
filename_base = self.line_file.text()
filename = os.path.splitext(filename_base)[0] + '_' + str(files_counter) + \
os.path.splitext(filename_base)[1]
cv2.imwrite(filename, self.current_frame)
self.label_saved_files.setText('Saved ' + str(files_counter + 1) + ' files.')
print('File ' + filename + ' saved.')
if self.radio_limit.isChecked():
self.label_saved_files.setText('Passed ' + str(iterations_counter + 1) + '/'
+ str(self.spin_limit.value()) + ' iterations.')
time.sleep(self.spin_interval.value() / 2)
iterations_counter += 1
if iterations_counter == self.spin_limit.value():
self.stop_multiple()
print('Done.')
self.label_saved_files.setText('Saved ' + str(files_counter + 1) + ' files.')
if self.radio_limit.isChecked():
time.sleep(self.spin_interval.value() / 2)
else:
time.sleep(self.spin_interval.value())
files_counter += 1
def stop_multiple(self):
self.save_running = False
self.btn_start_multiple.setEnabled(True)
def cv_thread(self):
while self.camera_running:
ret, img = self.cv_cap.read()
# Color space
if self.radio_color_hsv.isChecked():
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
if self.radio_color_grayscale.isChecked():
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Brightness + Contrast
self.current_frame = img.copy().astype('float32')
self.current_frame = (self.slider_contrast.value() / 50) * self.current_frame \
+ ((self.slider_brightness.value() - 50) * 4)
self.current_frame = np.clip(self.current_frame, 0, 255)
self.current_frame = self.current_frame.astype('uint8')
if self.radio_color_rgb.isChecked():
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(
cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2RGB))))
else:
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(self.current_frame)))
self.cv_cap.release()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.setStyle("fusion")
window = LR4()
window.show()
app.exec_()
|
__init__.py
|
#!/usr/bin/env python
# encode: utf-8
import collections
import itertools
import json
import logging
import multiprocessing
import subprocess
import sys
from multiprocessing import Queue
from time import time
import numpy
import websocket
from . import ffmpeg
from . import utils
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class SuperliveWebsocketProcess(multiprocessing.Process):
def __init__(self, q_out, ws_url):
"""
:type ws_url: str
:type q_out: queues.Queue
"""
super(SuperliveWebsocketProcess, self).__init__()
self.ws_url = ws_url
self.q_out = q_out
def run(self):
def on_open(ws):
log.debug("super live opened for %s" % self.ws_url)
def frame(ws, msg):
self.q_out.put(msg)
def on_error(ws, error):
log.error("socket error %s" % error)
self.q_out.put(None)
self.q_out.close()
def on_close(ws):
log.debug("done with live. socket closed! " + self.ws_url)
self.q_out.put(None)
self.q_out.close()
ws = websocket.WebSocketApp(self.ws_url,
on_open=on_open,
on_close=lambda _ws: on_close(_ws),
on_error=lambda _ws, _er: on_error(_ws, _er),
on_message=lambda _ws, _msg: frame(_ws, _msg)
)
ws.run_forever()
class DecodeH264Process(multiprocessing.Process):
def __init__(self, h264_frames_q, bgr24_frames_q, ss="00:00:00", fps="30", scale=(1000, 562)):
super(DecodeH264Process, self).__init__()
self.ss = ss
self.fps = fps
self.scale = scale
self.h264q = h264_frames_q
self.bgrq = bgr24_frames_q
def run(self):
ffmpeg_p = ffmpeg.bgr24_from_stdin_subp(self.fps, self.scale)
bgr_p = multiprocessing.Process(
target=lambda: ffmpeg.enqueue_frames_from_output(ffmpeg_p, self.bgrq, self.scale))
bgr_p.daemon = True
bgr_p.start()
log.debug("decoder() bgrq is %s" % str(self.bgrq))
while True:
bb = self.h264q.get()
if bb is None:
self.bgrq.put(None)
# self.bgrq.close()
log.info("done with decoding!")
return
# log.debug("written %s to decoder" % len(bb))
ffmpeg_p.stdin.write(bb)
def _emitt_image_output(_proc, _emitter, _scale):
"""
:type _emitter: rx.Emitter
:type _scale: tuple
:type _proc: subprocess.Popen
"""
try:
e = None
frame_counter = itertools.count()
while not _proc.poll():
img_size = _scale[0] * _scale[1] * 3
bb = _proc.stdout.read(img_size)
if len(bb) > 0:
try:
ndarr = numpy.frombuffer(bb, dtype=numpy.uint8).reshape((_scale[1], _scale[0], 3))
fn = next(frame_counter)
_emitter.onNext((fn, ndarr))
except Exception as err:
log.error("%s" % err)
e = _proc.poll()
if e >= 0 and len(bb) == 0:
break
log.debug("bye ffmpeg %d" % e)
if e == 0:
_emitter.onComplete()
elif e > 0:
_emitter.onError(RuntimeError("ffmpeg exits with code %d" % e))
except Exception:
_emitter.onError(sys.exc_info()[1])
def images_from_url(q: Queue, video_url: str, ss: str = "00:00:00", fps: str = None, scale: tuple = (224, 224),
pix_fmt: str = "bgr24", vf: list = None):
"""
:param ss: start second in a format of time "00:00:00"
:param pix_fmt: rawcodec image format bgr24 or rgb24
:type scale: tuple (width, height)
:type fps: str
:type video_url: str
:type ss: str
:type pix_fmt: str
:type q: queues.Queue
"""
ffmpeg_p = ffmpeg.images_from_url_subp(fps, scale, video_url, ss, image_format=pix_fmt, vf=vf)
if scale is None:
probe = ffprobe(video_url)
vstream = first_video_stream(probe)
scale = (int(vstream['width']), int(vstream['height']))
reader_p = multiprocessing.Process(target=lambda: ffmpeg.enqueue_frames_from_output(ffmpeg_p, q, scale))
reader_p.daemon = True
return reader_p
def ffprobe(url):
p = subprocess.Popen(["ffprobe", "-v", "error", "-show_streams", "-print_format", "json", url],
stdout=subprocess.PIPE)
p.wait()
if p.poll() != 0:
raise RuntimeError("ffprobe exit code is %s" % p.poll())
ffp = json.loads(p.stdout.read().decode("utf-8"))
return ffp
def first_video_stream(ffprobe_json: dict):
video_stream = list(filter(lambda s: "video" == s.get("codec_type"), ffprobe_json.get("streams")))
if video_stream:
return video_stream[0]
else:
return None
class StitchVideoProcess(multiprocessing.Process):
def __init__(self, frames_q: Queue, out_url: str, fps: str, scale: tuple, pix_fmt: str = "bgr24",
muxer: str = 'flv'):
"""
:type frames_q: queues.Queue
"""
super(StitchVideoProcess, self).__init__()
self.fps = fps
self.scale = scale
self.out_url = out_url
self.q = frames_q
self.container = muxer
self.pix_fmt = pix_fmt
def run(self):
try:
scale_str = "x".join(map(lambda x: str(x), self.scale))
cmd = ["ffmpeg", '-v', 'error', '-y', '-f', 'rawvideo',
'-vcodec', 'rawvideo', '-s', scale_str, '-pix_fmt', self.pix_fmt, '-r', str(self.fps),
'-i', '-', '-an',
'-pix_fmt', 'yuv420p', '-vcodec', 'libx264', '-profile:v', 'baseline', '-crf', '21', '-g',
str(self.fps),
'-b:v', '2400k',
'-f', self.container, self.out_url]
log.debug("popen '%s'" % " ".join(cmd))
ffmpeg_proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, bufsize=3 * self.scale[0] * self.scale[1])
frames_stack = collections.deque()
frames_counter = itertools.count()
next_fn = next(frames_counter)
start_time = time()
frames_processed = 0
for pair in utils.yield_from_queue(self.q, timeout_sec=30.2):
fn, inimg = pair
# log.debug("got pair for stiching fn:%s" % fn)
# validate inimg size
height, width = inimg.shape[:2]
# log.debug("stich input size %sx%s;" % (width, height))
assert height == self.scale[1], "height is different %s != %s" % (height, self.scale[1])
assert width == self.scale[0], "width is different %s != %s" % (width, self.scale[0])
frames_stack.append(pair)
if fn == next_fn:
frames_stack = collections.deque(sorted(frames_stack, key=lambda p: p[0]))
# log.debug("draining stack... next_frame=%s stack size=%s" % (next_fn, len(frames_stack)))
while len(frames_stack) > 0:
p = frames_stack.popleft()
fn = p[0]
img = p[1]
if fn == next_fn:
try:
ffmpeg_proc.stdin.write(img)
except Exception:
log.error("rtmpt ffmpeg failed? exiting", exc_info=True)
# self.frames_q.close()
self.terminate()
return
next_fn = next(frames_counter)
else:
frames_stack.appendleft(p)
break
frames_processed += 1
if frames_processed % 1000 == 0:
print('******** stitch fps = %.02f **********' % (frames_processed / (time() - start_time)))
start_time = time()
frames_processed = 0
log.info("done with stitching!")
# self.frames_q.close()
ffmpeg_proc.stdin.close()
ffmpeg_proc.wait()
return
except Exception:
log.error("video stitch exited. end of stream?", exc_info=True)
|
Painter.py
|
import time
import cv2
import HandTrackingModule as htm
import numpy as np
import random
from util3d import runtime_init, draw_line, draw_ball, draw_dot, draw_cuboid
from interaction import *
from gen3d import gen3d, trace3d
from math import floor
from queue import Queue
from threading import Thread
import azure.cognitiveservices.speech as speechsdk
import matplotlib.pyplot as plt
class Painter:
def __init__(self, opt: Options):
# Display function init
self.plain = np.zeros((480, 640, 3), np.uint8) # plots
self.panel = np.zeros((480, 640, 3), np.uint8) # bottoms
self.img = np.zeros((480, 640, 3), np.uint8) # img
self.result = np.zeros((480, 640, 3), np.uint8)
self.show_zone = [0, 0] # the display area
self.absolute_coor = [0, 0] # the absolute coordinate of the paint
self.ax = runtime_init()
# painting init
self.pre_dot = (0, 0, 0)
self.begin_dot = (0, 0, 0)
self.center = (0, 0, 0)
self.color = (255, 255, 0)
self.draw_mode = 'cuboid'
self.Signature = "H.Chen"
self.save_timestamp, self.switch_timestamp, self.text_timestamp, \
self.line_timestamp, self.move_timestamp, self.voice_timestamp = 0, 0, 0, 0, 0, 0
self.radius = 5
# hand detectors
self.detector = htm.handDetctor(detectionCon=0.7)
self.project_name = project_init()
self.file = None
# Options
self.opt = opt
self.opt.preview3d = False
self.opt.view3d = False
self.opt.export3d = False
wCam, hCam = 640, 480
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.cap.set(3, wCam)
self.cap.set(4, hCam)
self.request_queue = Queue()
self.result_queue = Queue()
self.killed = False
def display_init(self):
cv2.rectangle(self.panel, (520, 0), (640, 40), (255, 99, 93), -1)
cv2.rectangle(self.panel, (520, 440), (640, 480), (255, 99, 93), -1)
cv2.rectangle(self.panel, (0, 440), (120, 480), (255, 99, 93), -1)
if time.time()-self.switch_timestamp > 5:
cv2.rectangle(self.panel, (0, 0), (120, 40), (255, 99, 93), -1)
else:
cv2.rectangle(self.panel, (0, 0), (120, 40), (135, 62, 57), -1)
cv2.putText(self.panel, "Clear", (530, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, "Save", (10, 460), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, "Exit", (530, 460), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, self.draw_mode + " mode", (10, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
if self.draw_mode == 'move':
cv2.rectangle(self.panel, (160, 0), (480, 40), (116, 153, 255), -1)
cv2.putText(self.panel, "L", (200, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, "T", (280, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, "B", (360, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
cv2.putText(self.panel, "R", (440, 20), cv2.FONT_HERSHEY_PLAIN, 1, (46, 255, 224), 1)
else:
cv2.rectangle(self.panel, (160, 0), (480, 40), (0, 0, 0), -1)
def painter_exit(self):
self.file.close()
self.killed = True
import shutil
shutil.copy('trace.txt', './output/{}/trace.txt'.format(self.project_name))
if self.opt.export3d or self.opt.view3d:
gen3d()
if self.opt.view3d_trace:
trace3d()
exit()
def clear(self):
self.plain = np.zeros((480, 640, 3), np.uint8)
plt.cla()
self.show_zone = [0, 0]
self.file.write("clear\n")
self.pre_dot = (0, 0, 0)
self.center = (0, 0, 0)
def on_EVENT_LBUTTONDOWN(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
if x > 520 and y < 40:
self.clear()
elif x < 120 and y < 40:
self.draw_mode = switch_mode(self.draw_mode)
elif x > 520 and y > 440:
self.painter_exit()
elif x < 120 and y > 440:
self.save_photo()
self.save_timestamp = time.time()
cv2.rectangle(self.img, (0, 440), (120, 480), (0, 0, 0), -1)
def move_panel(self, image, x=None, direction_str: str=None):
if time.time() - self.move_timestamp < 0.5:
return image
self.move_timestamp = time.time()
direction = floor((x - 160) / 80)
self.file.write("move\n")
if direction == 1 or direction_str == 'top': # Top
self.absolute_coor[1] -= 50
if self.show_zone[1] == 0:
image = cv2.copyMakeBorder(image, 50, 0, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
self.show_zone[1] -= 50
elif direction == 2 or direction_str == 'down':
self.absolute_coor[1] += 50
if self.show_zone[1] + 480 == image.shape[0]:
image = cv2.copyMakeBorder(image, 0, 50, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
self.show_zone[1] += 50
elif direction == 0 or direction_str == 'left':
self.absolute_coor[0] -= 50
if self.show_zone[0] == 0:
image = cv2.copyMakeBorder(image, 0, 0, 50, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
self.show_zone[0] -= 50
elif direction == 3 or direction_str == 'right':
self.absolute_coor[0] += 50
if self.show_zone[0] + 640 == image.shape[1]:
image = cv2.copyMakeBorder(image, 0, 0, 0, 50, cv2.BORDER_CONSTANT, value=(0, 0, 0))
self.show_zone[0] += 50
return image
def save_photo(self):
now = datetime.datetime.now()
cv2.imwrite("./output/{}/Image{}.jpg".format(self.project_name, now.strftime("%M_%S")), self.result)
def speech_recognition(self):
try:
azure_license = open("azure_key.txt")
key, region = azure_license.readline()[:-1].split()
print("Using sdk from ", region)
speech_config = speechsdk.SpeechConfig(subscription=key, region=region)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
while not self.killed:
if not self.request_queue.empty():
self.request_queue.get()
result = speech_recognizer.recognize_once_async().get()
# print(result.text)
self.result_queue.put(result.text)
except FileNotFoundError as e:
print(e)
print('Please check out voice control wiki at\n'
'https://github.com/HarryXD2018/3DPainter/wiki/Installation#voice-control-service')
def run(self):
with open('trace.txt', 'w') as f:
self.file = f
f.write(self.project_name + '\n') # 1 is meaningless but to make gen works.
while True:
success, self.img = self.cap.read()
self.img = cv2.flip(self.img, 1)
self.img = self.detector.findHands(self.img)
lmList = self.detector.findPosition(self.img, draw=False)
self.display_init()
if not self.result_queue.empty():
command: str = self.result_queue.get()
command = command.lower()
print(command)
if 'exit' in command:
self.painter_exit()
elif 'clear' in command:
self.clear()
elif 'mode' in command:
for mode in ['brush', 'ball', 'dot', 'line', 'cuboid', 'text']:
if mode in command:
self.draw_mode = mode
for direction in ['top', 'down', 'left', 'right']:
if direction in command:
self.plain = self.move_panel(self.plain, direction_str=direction)
if len(lmList) != 0:
thumbOpen, firstOpen, secondOpen, thirdOpen, fourthOpen = self.detector.fingerStatus(lmList)
if firstOpen and not secondOpen and not thirdOpen and not fourthOpen:
_, screen_x, screen_y, z = lmList[8]
absolute_x, absolute_y = coor3d((screen_x, screen_y), self.absolute_coor)
plain_x, plain_y = img2plain(screen_x, screen_y, self.show_zone)
# Move
if self.draw_mode == 'move':
if 160 < screen_x < 480 and screen_y < 40:
self.plain = self.move_panel(self.plain, x=screen_x)
# Switch Mode
if screen_x < 120 and screen_y < 40 and time.time()-switch_timestamp > 10:
switch_timestamp = time.time()
self.draw_mode = switch_mode(self.draw_mode)
self.pre_dot = (0, 0, 0)
# Clear Plain
elif screen_x > 360 and screen_y < 40:
self.clear()
else:
if self.draw_mode == 'brush':
if self.pre_dot != (0, 0, 0):
cv2.line(self.plain, (plain_x, plain_y), self.pre_dot[:2], self.color, 3)
draw_line(self.ax, (absolute_x, absolute_y, z),
plain2abs(self.pre_dot, coor=self.absolute_coor, zone=self.show_zone), color=self.color)
f.write("b {} {} {} {} {} {}\n".format(absolute_x, absolute_y, z,
self.color[2], self.color[1], self.color[0]))
self.pre_dot = (plain_x, plain_y, z)
elif self.draw_mode == 'ball':
if self.center != (0, 0, 0):
cv2.circle(self.plain, center=self.center[:2],
color=(0, 255, 255), radius=self.radius, thickness=-1)
draw_ball(self.ax, plain2abs(self.center, coor=self.absolute_coor, zone=self.show_zone), self.radius)
str_temp = list(map(str, plain2abs(self.center, coor=self.absolute_coor, zone=self.show_zone)))
str_temp = " ".join(str_temp)
self.file.write("s " + str_temp + " {}\n".format(self.radius))
self.center = (0, 0, 0)
self.radius = 5
elif self.draw_mode == 'line':
if self.begin_dot != (0, 0, 0):
cv2.line(self.img, (screen_x, screen_y),
plain2img(self.begin_dot[0], self.begin_dot[1], self.show_zone),
(205, 0, 255), 3)
elif self.draw_mode == 'cuboid':
if self.begin_dot != (0, 0, 0):
cv2.rectangle(self.img, (screen_x, screen_y),
plain2img(self.begin_dot[0], self.begin_dot[1], self.show_zone),
(245, 255, 79), 3)
elif self.draw_mode == 'dot':
self.pre_dot = (plain_x, plain_y, z)
# Eraser
if firstOpen and secondOpen and not thirdOpen and not fourthOpen:
_, screen_x, screen_y, z = lmList[8]
plain_x, plain_y = img2plain(screen_x, screen_y, self.show_zone)
cv2.rectangle(self.plain, (plain_x-15, plain_y-15), (plain_x+15, plain_y+15), (0, 0, 0), -1)
cv2.rectangle(self.img, (screen_x - 15, screen_y - 15), (screen_x + 15, screen_y + 15), (255, 255, 255), -1)
cv2.rectangle(self.img, (screen_x - 15, screen_y - 15), (screen_x + 15, screen_y + 15), (0, 0, 0), 1)
self.pre_dot = (0, 0, 0)
if firstOpen and fourthOpen and not secondOpen and not thirdOpen:
_, screen_x, screen_y, z = lmList[8]
absolute_x, absolute_y = coor3d((screen_x, screen_y), self.absolute_coor)
plain_x, plain_y = img2plain(screen_x, screen_y, self.show_zone)
if self.draw_mode == 'brush':
self.color = (random.randint(100, 255), random.randint(100, 255), random.randint(100, 255))
# Template, don't draw on img
elif self.draw_mode == 'ball':
self.center = (plain_x, plain_y, z)
cv2.circle(self.img, center=(screen_x, screen_y), color=(0, 200, 200), radius=self.radius, thickness=3)
if int(time.time()) % 2 == 0:
self.radius += 5
elif self.draw_mode == 'dot':
cv2.circle(self.plain, center=(plain_x, plain_y), color=(0, 255, 35), radius=2, thickness=-1)
draw_dot(self.ax, (absolute_x, absolute_y, z))
str_temp = list(map(str, plain2abs(self.pre_dot, self.absolute_coor, self.show_zone)))
self.file.write("d " + " ".join(str_temp) + " \n")
elif self.draw_mode == 'line':
if self.begin_dot == (0, 0, 0):
if time.time()-self.line_timestamp > 2:
self.begin_dot = (plain_x, plain_y, z)
elif abs(self.begin_dot[0] - plain_x) + abs(self.begin_dot[1] - plain_y) > 20:
cv2.line(self.plain, (plain_x, plain_y), self.begin_dot[:2], (205, 0, 255), 3)
draw_line(self.ax, (absolute_x, absolute_y, z),
plain2abs(self.begin_dot, self.absolute_coor, self.show_zone), (205, 0, 255))
str_temp = list(map(str, plain2abs(self.begin_dot, self.absolute_coor, self.show_zone)))
self.file.write("l {} {} {} ".format(absolute_x, absolute_y, z) + " ".join(str_temp) + "\n")
self.begin_dot = (0, 0, 0)
self.line_timestamp = time.time()
elif self.draw_mode == 'cuboid':
if self.begin_dot == (0, 0, 0):
if time.time()-self.line_timestamp > 2:
self.begin_dot = (plain_x, plain_y, z)
elif abs(self.begin_dot[0] - plain_x) + abs(self.begin_dot[1] - plain_y) > 20:
cv2.rectangle(self.plain, (plain_x, plain_y), self.begin_dot[:2], (245, 255, 79), -1)
draw_cuboid(self.ax, (absolute_x, absolute_y, z),
plain2abs(self.begin_dot, self.absolute_coor, self.show_zone), (245, 255, 79))
str_temp = list(map(str, plain2abs(self.begin_dot, self.absolute_coor, self.show_zone)))
self.file.write("c {} {} {} ".format(plain_x, plain_y, z) + " ".join(str_temp) + " \n")
self.begin_dot = (0, 0, 0)
self.line_timestamp = time.time()
elif self.draw_mode == 'text':
if time.time()-self.text_timestamp > 2:
cv2.putText(self.plain, self.Signature, (plain_x, plain_y), cv2.FONT_HERSHEY_PLAIN, 3, (102, 248, 255), 1)
self.file.write("t {} {} {} {}\n".format(absolute_x, absolute_y, z, self.Signature))
self.text_timestamp = time.time()
if not firstOpen:
self.pre_dot = (0, 0, 0)
self.center = (0, 0, 0)
if firstOpen and secondOpen and thumbOpen and thirdOpen and fourthOpen:
if time.time() - self.voice_timestamp > 10:
self.request_queue.put(1)
self.voice_timestamp = time.time()
else:
cv2.circle(self.img, center=(320, 30), color=(0, 0, 255), radius=15, thickness=-1)
cv2.circle(self.img, center=(320, 30), color=(0, 0, 255), radius=25, thickness=2)
temp = self.plain[self.show_zone[1]: (self.show_zone[1]+480), self.show_zone[0]: (self.show_zone[0]+640)]
cv2.imshow("full view", self.plain)
if time.time() - self.save_timestamp < 0.4: # camera shooter effect
self.img = cv2.add(self.img, self.img)
self.result = cv2.addWeighted(self.img, 0.7, temp, 0.3, 0)
img2gray = cv2.cvtColor(self.panel, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(self.result, self.result, mask=mask_inv)
display = cv2.add(img1_bg, self.panel)
cv2.imshow("image", display)
cv2.setMouseCallback("image", self.on_EVENT_LBUTTONDOWN)
if self.opt.preview3d:
plt.pause(0.1)
if cv2.waitKey(2) & 0xFF == 27:
plt.ioff()
break
if __name__ == '__main__':
opt.view3d_trace = False
opt.export3d = False
opt.preview3d = True
painter = Painter(opt)
main_thread = Thread(target=painter.run)
voice_thread = Thread(target=painter.speech_recognition, daemon=True)
main_thread.start()
voice_thread.start()
|
thread_example.py
|
import threading
import keyboard
import time
import numpy as np
import sys
import sys, time
from bledevice import scanble, BLEDevice
STOP = 0
MOVE_FWD = 1
MOVE_BWD = 2
MOVE_R = 3
MOVE_L = 4
IDLE = 9
F = 5
S = 6
Mon = 7
Moff = 8
direction = 1;
state = 0
keycode = ""
Device1 = BLEDevice("DD:43:89:16:43:81")
Device2 = BLEDevice("F4:82:B3:50:ED:55")
def print_pressed_keys(e):
global keycode
keycode = str([str(code) for code in keyboard._pressed_events])
keycode = keycode[2:-2]
def func1():
global keycode
keyboard.hook(print_pressed_keys)
while(1):
if keycode =='17': #W
state = MOVE_FWD
elif keycode =='31': #S
state = MOVE_BWD
elif keycode =='32': #D
state = MOVE_R
elif keycode =='30': #A
state = MOVE_L
elif keycode =='57': #spacebar
state = STOP
elif keycode =='19': #R
state = Mon
elif keycode =='1': #esc
state = Moff
time.sleep(0.01)
motor_OFF()
time.sleep(2)
def data_ON():
Device1.writereq(0xd,'545457550D0A') #RUN_flag
Device2.writereq(0xd,'545457550D0A') #RUN_flag
def data_OFF():
Device1.writereq(0xd,'545446660D0A') #RUN_flag
Device2.writereq(0xd,'545446660D0A') #RUN_flag
def motor_OFF():
Device1.writereq(0xd,'545246680D0A') #RUN_flag
Device2.writereq(0xd,'545246680D0A') #RUN_flag
def motor_ON():
Device1.writereq(0xd,'54524F5F0D0A') #RUN_flag
Device2.writereq(0xd,'54524F5F0D0A') #RUN_flag
def read_data():
print("receive --\n")
#requester1.read_by_handle_async(0x0d, self.response)
#data = self.response.received()[0]
#print("received: ",data)
def M_FWD():
#motor_ON()
print("M_FWD")
global direction
if direction == -1:
time.sleep(0.2)
Device1.writereq(0xd,'544443790D0A')#CCW forward
Device2.writereq(0xd,'544443790D0A')#CCW forward
direction = 1;
Device1.writereq(0xd,'545714950D0A')#2km/h
Device2.writereq(0xd,'545714950D0A')#2km/h
def M_IDLE():
#motor_ON()
#print("MOTOR IDLE\n");
Device1.writereq(0xd,'545705A40D0A')#2km/h
Device2.writereq(0xd,'545705A40D0A')#2km/h
def M_BWD():
#motor_ON()
global direction
if direction == 1:
time.sleep(0.2)
Device1.writereq(0xd,'544457650D0A')#CW backward
Device2.writereq(0xd,'544457650D0A')#CW backward
direction = -1;
Device1.writereq(0xd,'545714950D0A')#2km/h
Device2.writereq(0xd,'545714950D0A')#2km/h
def M_RIGHT():
Device1.writereq(0xd,'545714950D0A')#2km/h
Device2.writereq(0xd,'545732770D0A')#5km/h
def M_LEFT():
Device1.writereq(0xd,'545732770D0A')#5km/h
Device2.writereq(0xd,'545714950D0A')#2km/h
def M_STOP():
Device1.writereq(0xd,'545700A90D0A')#0km/h
Device2.writereq(0xd,'545700A90D0A')#0km/h
def fFASTER():
Device1.writereq(0xd,'547575160D0A')#Spd_Up
Device2.writereq(0xd,'547575160D0A')#Spd_Up
def fSLOWER():
Device1.writereq(0xd,'546464380D0A')#Spd_Down
Device2.writereq(0xd,'546464380D0A')#Spd_Down
keyboard.add_hotkey('w', M_FWD)
keyboard.add_hotkey('a', M_LEFT)
keyboard.add_hotkey('s', M_BWD)
keyboard.add_hotkey('d', M_RIGHT)
keyboard.add_hotkey('space', M_STOP)
keyboard.add_hotkey('esc', motor_OFF)
keyboard.add_hotkey('r', motor_ON)
def bleconnect():
motor_OFF()
motor_ON()
data_OFF()
state = IDLE
while(1):
time.sleep(0.1)
if __name__ == "__main__":
t = threading.Thread(target=func1)
t.start()
t2 = threading.Thread(target=bleconnect)
t2.start()
while(1):
#print("state = ",state,"direction = ",direction);
#print("\n")
time.sleep(0.01)
|
app.py
|
import asyncio
import functools
import json
import logging
import os.path
import threading
import time
import schedule
from lokbot.async_farmer import AsyncLokFarmer
from lokbot.farmer import LokFarmer
from lokbot import project_root, builtin_logger
def find_alliance(farmer: LokFarmer):
while True:
alliance = farmer.api.alliance_recommend().get('alliance')
if alliance.get('numMembers') < alliance.get('maxMembers'):
farmer.api.alliance_join(alliance.get('_id'))
break
time.sleep(60 * 5)
def load_config():
os.chdir(project_root)
if os.path.exists('config.json'):
return json.load(open('config.json'))
if os.path.exists('config.example.json'):
return json.load(open('config.example.json'))
return {}
thread_map = {}
def run_threaded(name, job_func):
if name in thread_map and thread_map[name].is_alive():
return
job_thread = threading.Thread(target=job_func, name=name)
thread_map[name] = job_thread
job_thread.start()
def async_main(token):
async_farmer = AsyncLokFarmer(token)
asyncio.run(async_farmer.parallel_buy_caravan())
def main(token, captcha_solver_config=None):
# async_main(token)
# exit()
if captcha_solver_config is None:
captcha_solver_config = {}
config = load_config()
if not config.get('socketio').get('debug'):
builtin_logger.setLevel(logging.CRITICAL)
farmer = LokFarmer(token, captcha_solver_config)
farmer.keepalive_request()
threading.Thread(target=farmer.sock_thread).start()
# threading.Thread(target=farmer.socc_thread).start()
for job in config.get('main').get('jobs'):
if not job.get('enabled'):
continue
name = job.get('name')
schedule.every(
job.get('interval').get('start')
).to(
job.get('interval').get('end')
).minutes.do(run_threaded, name, functools.partial(getattr(farmer, name), **job.get('kwargs', {})))
schedule.run_all()
schedule.every(5).to(10).minutes.do(farmer.keepalive_request)
for thread in config.get('main').get('threads'):
if not thread.get('enabled'):
continue
threading.Thread(target=getattr(farmer, thread.get('name')), kwargs=thread.get('kwargs')).start()
while True:
schedule.run_pending()
time.sleep(1)
|
test_basic.py
|
import re
import sys
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware:
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "'localhost' is not a valid cookie domain" in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return str(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash("Hello World")
flask.flash("Hello World", "error")
flask.flash(flask.Markup("<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
"Hello World",
"Hello World",
flask.Markup("<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", "Hello World"),
("error", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", "Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == "Hello World"
assert messages[1] == flask.Markup("<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert "This was submitted: 'index.txt'" in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return "Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return "Hällo Wörld".encode()
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response(
"Hello world", 404, {"Content-Type": "text/html", "X-Foo": "Baz"}
),
{"Content-Type": "text/plain", "X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == "Hällo Wörld".encode()
assert client.get("/bytes").data == "Hällo Wörld".encode()
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.content_type == "text/plain"
assert rv.headers.getlist("X-Foo") == ["Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
assert "from_none" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python >= 3.7")
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires Python >= 3.6")
def test_static_folder_with_pathlib_path(app):
from pathlib import Path
app = flask.Flask(__name__, static_folder=Path("static"))
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_ending_slash():
app = flask.Flask(__name__, static_folder="static/")
@app.route("/<path:path>")
def catch_all(path):
return path
rv = app.test_client().get("/catch/all")
assert rv.data == b"catch/all"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route("/киртест")
def index():
return "Hello World!"
rv = client.get("/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert "Make sure to directly send your POST-request to this URL" in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View:
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = f"running on {hostname}:{port} ..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == f"running on {hostname}:{port} ..."
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
|
websocket30402.py
|
#ref: https://gist.github.com/rich20bb/4190781
#
# Set COMM Port (see: ser.port)
#
# Usage:
# [Anaconda2] C:\Users\cruncher>python websocket30402.py
import time
import struct
import socket
import hashlib
import base64
import sys
from select import select
import re
import logging
from threading import Thread
import signal
#---serial port comms.
# ref: http://pyserial.readthedocs.io/en/latest/index.html
import serial
import binascii
ser = serial.Serial()
ser.port = 'COM3' # tracker = 'COM10', rob3 = 'COM3'
ser.baudrate = 9600
ser.bytesize = serial.EIGHTBITS # Data bits
ser.parity = serial.PARITY_NONE # Parity
ser.stopbits = serial.STOPBITS_ONE # Stop bits
ser.timeout = 1.0
ser.xonxoff = False # Software flow control.
ser.rtscts = False # Hardware flow control (RTS/CTS).
ser.srdtr = False # Hardware flow control (DSR/DTR).
ser.write_timeout = 1.0
ser.inter_byte_time = 1.0
try:
ser.open()
except Exception, e:
print "error open serial port: " + str(e)
exit()
#---serial port comms.
# Simple WebSocket server implementation. Handshakes with the client then echos back everything
# that is received. Has no dependencies (doesn't require Twisted etc) and works with the RFC6455
# version of WebSockets. Tested with FireFox 16, though should work with the latest versions of
# IE, Chrome etc.
#
# rich20b@gmail.com
# Adapted from https://gist.github.com/512987 with various functions stolen from other sites, see
# below for full details.
# Constants
MAGICGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
TEXT = 0x01
BINARY = 0x02
# WebSocket implementation
class WebSocket(object):
handshake = (
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %(acceptstring)s\r\n"
"Server: TestTest\r\n"
"Access-Control-Allow-Origin: http://localhost\r\n"
"Access-Control-Allow-Credentials: true\r\n"
"\r\n"
)
# Constructor
def __init__(self, client, server):
self.client = client
self.server = server
self.handshaken = False
self.header = ""
self.data = ""
# Serve this client
def feed(self, data):
# If we haven't handshaken yet
if not self.handshaken:
logging.debug("No handshake yet")
self.header += data
if self.header.find('\r\n\r\n') != -1:
parts = self.header.split('\r\n\r\n', 1)
self.header = parts[0]
if self.dohandshake(self.header, parts[1]):
logging.info("Handshake successful")
self.handshaken = True
# We have handshaken
else:
logging.debug("Handshake is complete")
# Decode the data that we received according to section 5 of RFC6455
recv = self.decodeCharArray(data)
#---serial port comms.
if ser.isOpen():
try:
ser.flushInput() #flush input buffer, discarding all its contents
ser.flushOutput() #flush output buffer, aborting current output
#and discard all that is in buffer
#write data
#packet = "\xFF\xFF\x07\xFD\x19\xE2\x1C"
print recv
packet = ''.join(recv).decode('utf-8').encode('utf-16be') #IMPORTANT: 'strip()' removed. 'strip()' will remove 0x20 (white space).
#''.join(recv).strip().decode('utf-8')
#u'\xff\xff\x07\xfd\x19\xe2\x1c' (unicode)
print("Packet sent: " + '0x' + binascii.hexlify(packet))
packet = packet[1::2] # Drop every other, as they are 0x00
print("Packet sent: " + '0x' + binascii.hexlify(packet))
ser.write(packet)
time.sleep(0.5) #give the serial port sometime to receive the data
numOfLines = 0
#recv = ['\xc3', '\xbf', '\xc3', '\xbf', '\x07', '\xc3', '\xbd', '\x19', '\xc3', '\xa2', '\x1c'] #ASCII from Unicode ('\xc3', '\xbf' = '\x00ff')
#recv = ['\xff', '\xff', '\x09', '\xfd', '\x59', '\xac', '\x52', '\x00', '\x00']
#recv = [u'\xff', u'\xff', u'\x09', u'\xfd', u'\x59', u'\xac', u'\x52', u'\x00', u'\x00']
#recv = ['\x00ff', '\x00ff', '\x0009', '\x00fd', '\x0059', '\x00ac', '\x0052', '\x0000', '\x0000']
#recv = ['\x00', '\xff', '\x00', '\xff', '\x00', '\x09', '\x00', '\xfd', '\x00', '\x59', '\x00', '\xac', '\x00', '\x52', '\x00', '\x00', '\x00', '\x00']
#recv = u""
recv = [] #Clean.
packetSize = 1 #MIN_PACKET_SIZE
while True:
numOfLines = numOfLines + 1
response = ser.read(1) # Blocking.
print("read data: " + '0x' + binascii.hexlify(response)) #Write hexadecimal to console.
recv.append(response.decode('latin1')) #'latin1' maps bytes 0-255 to unicode characters 0-255.
if(numOfLines >= packetSize): break
#recv = ['\xff', '\xff', '\x09', '\xfd', '\x59', '\xac', '\x52', '\x00', '\x00']
except Exception, e1:
print "error communicating...: " + str(e1)
else:
print "cannot open serial port "
#---serial port comms.
# Send our reply
####for hex in recv: print '%x' % ord(hex[0])
#self.sendMessage(''.join(recv).strip())
self.sendMessage(''.join(recv).encode("utf8")) #''.join(recv).encode("utf8") IMPORTANT: removed '.strip()'.
# Stolen from http://www.cs.rpi.edu/~goldsd/docs/spring2012-csci4220/websocket-py.txt
def sendMessage(self, s):
"""
Encode and send a WebSocket message
"""
# Empty message to start with
message = ""
# always send an entire message as one frame (fin)
b1 = 0x80
# in Python 2, strs are bytes and unicodes are strings
if type(s) == unicode:
b1 |= TEXT
payload = s.encode("UTF8")
elif type(s) == str:
b1 |= TEXT
payload = s
# Append 'FIN' flag to the message
message += chr(b1)
# never mask frames from the server to the client
b2 = 0
# How long is our payload?
length = len(payload)
if length < 126:
b2 |= length
message += chr(b2)
elif length < (2 ** 16) - 1:
b2 |= 126
message += chr(b2)
l = struct.pack(">H", length)
message += l
else:
l = struct.pack(">Q", length)
b2 |= 127
message += chr(b2)
message += l
# Append payload to message
message += payload
# Send to the client
self.client.send(str(message))
# Stolen from http://stackoverflow.com/questions/8125507/how-can-i-send-and-receive-websocket-messages-on-the-server-side
def decodeCharArray(self, stringStreamIn):
# Turn string values into opererable numeric byte values
byteArray = [ord(character) for character in stringStreamIn]
datalength = byteArray[1] & 127
indexFirstMask = 2
if datalength == 126:
indexFirstMask = 4
elif datalength == 127:
indexFirstMask = 10
# Extract masks
masks = [m for m in byteArray[indexFirstMask : indexFirstMask+4]]
indexFirstDataByte = indexFirstMask + 4
# List of decoded characters
decodedChars = []
i = indexFirstDataByte
j = 0
# Loop through each byte that was received
while i < len(byteArray):
# Unmask this byte and add to the decoded buffer
decodedChars.append( chr(byteArray[i] ^ masks[j % 4]) )
i += 1
j += 1
# Return the decoded string
return decodedChars
# Handshake with this client
def dohandshake(self, header, key=None):
logging.debug("Begin handshake: %s" % header)
# Get the handshake template
handshake = self.handshake
# Step through each header
for line in header.split('\r\n')[1:]:
name, value = line.split(': ', 1)
# If this is the key
if name.lower() == "sec-websocket-key":
# Append the standard GUID and get digest
combined = value + MAGICGUID
response = base64.b64encode(hashlib.sha1(combined).digest())
# Replace the placeholder in the handshake response
handshake = handshake % { 'acceptstring' : response }
logging.debug("Sending handshake %s" % handshake)
self.client.send(handshake)
return True
def onmessage(self, data):
#logging.info("Got message: %s" % data)
self.send(data)
def send(self, data):
logging.info("Sent message: %s" % data)
self.client.send("\x00%s\xff" % data)
def close(self):
self.client.close()
# WebSocket server implementation
class WebSocketServer(object):
# Constructor
def __init__(self, bind, port, cls):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((bind, port))
self.bind = bind
self.port = port
self.cls = cls
self.connections = {}
self.listeners = [self.socket]
# Listen for requests
def listen(self, backlog=5):
self.socket.listen(backlog)
logging.info("Listening on %s" % self.port)
# Keep serving requests
self.running = True
while self.running:
# Find clients that need servicing
rList, wList, xList = select(self.listeners, [], self.listeners, 1)
for ready in rList:
if ready == self.socket:
logging.debug("New client connection")
client, address = self.socket.accept()
fileno = client.fileno()
self.listeners.append(fileno)
self.connections[fileno] = self.cls(client, self)
else:
logging.debug("Client ready for reading %s" % ready)
client = self.connections[ready].client
data = client.recv(4096) # Data from HTML client.
fileno = client.fileno()
if data:
self.connections[fileno].feed(data)
else:
logging.debug("Closing client %s" % ready)
self.connections[fileno].close()
del self.connections[fileno]
self.listeners.remove(ready)
# Step though and delete broken connections
for failed in xList:
if failed == self.socket:
logging.error("Socket broke")
for fileno, conn in self.connections:
conn.close()
self.running = False
# Read serial port.
# Entry point
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
server = WebSocketServer("", 30402, WebSocket)
server_thread = Thread(target=server.listen, args=[5])
server_thread.start()
# Add SIGINT handler for killing the threads
def signal_handler(signal, frame):
logging.info("Caught Ctrl+C, shutting down...")
#---serial port comms.
ser.close() # Close serial port.
#---serial port comms.
server.running = False
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
while True:
time.sleep(100)
|
server.py
|
import asyncio
import multiprocessing
import os
import secrets
import socket
import stat
import sys
import traceback
from collections import deque
from functools import partial
from inspect import isawaitable
from ipaddress import ip_address
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from time import time
from typing import Type
from httptools import HttpRequestParser # type: ignore
from httptools.parser.errors import HttpParserError # type: ignore
from sanic.compat import Header, ctrlc_workaround_for_windows
from sanic.config import Config
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop # type: ignore
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
OS_IS_WINDOWS = os.name == "nt"
class Signal:
stopped = False
class ConnInfo:
"""Local and remote addresses and SSL status info."""
__slots__ = (
"sockname",
"peername",
"server",
"server_port",
"client",
"client_port",
"ssl",
)
def __init__(self, transport, unix=None):
self.ssl = bool(transport.get_extra_info("sslcontext"))
self.server = self.client = ""
self.server_port = self.client_port = 0
self.peername = None
self.sockname = addr = transport.get_extra_info("sockname")
if isinstance(addr, str): # UNIX socket
self.server = unix or addr
return
# IPv4 (ip, port) or IPv6 (ip, port, flowinfo, scopeid)
if isinstance(addr, tuple):
self.server = addr[0] if len(addr) == 2 else f"[{addr[0]}]"
self.server_port = addr[1]
# self.server gets non-standard port appended
if addr[1] != (443 if self.ssl else 80):
self.server = f"{self.server}:{addr[1]}"
self.peername = addr = transport.get_extra_info("peername")
if isinstance(addr, tuple):
self.client = addr[0] if len(addr) == 2 else f"[{addr[0]}]"
self.client_port = addr[1]
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
"conn_info",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_unix",
"_body_chunks",
)
def __init__(
self,
*,
loop,
app,
signal=Signal(),
connections=None,
state=None,
unix=None,
**kwargs,
):
asyncio.set_event_loop(loop)
self.loop = loop
deprecated_loop = self.loop if sys.version_info < (3, 7) else None
self.app = app
self.transport = None
self.conn_info = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.signal = signal
self.access_log = self.app.config.ACCESS_LOG
self.connections = connections if connections is not None else set()
self.request_handler = self.app.handle_request
self.error_handler = self.app.error_handler
self.request_timeout = self.app.config.REQUEST_TIMEOUT
self.request_buffer_queue_size = (
self.app.config.REQUEST_BUFFER_QUEUE_SIZE
)
self.response_timeout = self.app.config.RESPONSE_TIMEOUT
self.keep_alive_timeout = self.app.config.KEEP_ALIVE_TIMEOUT
self.request_max_size = self.app.config.REQUEST_MAX_SIZE
self.request_class = self.app.request_class or Request
self.is_request_stream = self.app.is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=deprecated_loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = self.app.config.KEEP_ALIVE
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._unix = unix
self._not_paused.set()
self._body_chunks = deque()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self.conn_info = ConnInfo(transport, unix=self._unix)
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self.app.debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
self.request.conn_info = self.conn_info
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.app.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(f"Unknown Expect: {expect}")
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
# body chunks can be put into asyncio.Queue out of order if
# multiple tasks put concurrently and the queue is full in python
# 3.7. so we should not create more than one task putting into the
# queue simultaneously.
self._body_chunks.append(body)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
else:
self.request.body_push(body)
async def body_append(self, body):
if (
self.request is None
or self._request_stream_task is None
or self._request_stream_task.cancelled()
):
return
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
async def stream_append(self):
while self._body_chunks:
body = self._body_chunks.popleft()
if self.request:
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._body_chunks.append(None)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = f"{self.request.ip}:{self.request.port}"
extra["request"] = f"{self.request.method} {self.request.url}"
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
f"Writing error failed, connection closed {e!r}",
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser and self.transport is not None:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = (
"loop",
"serve_coro",
"_after_start",
"_before_stop",
"_after_stop",
"server",
"connections",
)
def __init__(
self,
loop,
serve_coro,
connections,
after_start,
before_stop,
after_stop,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.loop = loop
self.serve_coro = serve_coro
self._after_start = after_start
self._before_stop = before_stop
self._after_stop = after_stop
self.server = None
self.connections = connections
def after_start(self):
"""Trigger "after_server_start" events"""
trigger_events(self._after_start, self.loop)
def before_stop(self):
"""Trigger "before_server_stop" events"""
trigger_events(self._before_stop, self.loop)
def after_stop(self):
"""Trigger "after_server_stop" events"""
trigger_events(self._after_stop, self.loop)
def is_serving(self):
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
if self.server:
try:
return self.server.start_serving()
except AttributeError:
raise NotImplementedError(
"server.start_serving not available in this version "
"of asyncio or uvloop."
)
def serve_forever(self):
if self.server:
try:
return self.server.serve_forever()
except AttributeError:
raise NotImplementedError(
"server.serve_forever not available in this version "
"of asyncio or uvloop."
)
def __await__(self):
"""Starts the asyncio server, returns AsyncServerCoro"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self
def serve(
host,
port,
app,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
ssl=None,
sock=None,
unix=None,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
state=None,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param unix: Unix socket to listen on instead of TCP port
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param run_async: bool: Do not create a new event loop for the server,
and return an AsyncServer object rather than running it
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if app.debug:
loop.set_debug(app.debug)
app.asgi = False
connections = connections if connections is not None else set()
protocol_kwargs = _build_protocol_kwargs(protocol, app.config)
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
state=state,
unix=unix,
**protocol_kwargs,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
# UNIX sockets are always bound by us (to preserve semantics between modes)
if unix:
sock = bind_unix_socket(unix, backlog=backlog)
server_coroutine = loop.create_server(
server,
None if sock else host,
None if sock else port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs,
)
if run_async:
return AsyncioServer(
loop=loop,
serve_coro=server_coroutine,
connections=connections,
after_start=after_start,
before_stop=before_stop,
after_stop=after_stop,
)
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
if OS_IS_WINDOWS:
ctrlc_workaround_for_windows(app)
else:
for _signal in [SIGTERM] if run_multiple else [SIGINT, SIGTERM]:
loop.add_signal_handler(_signal, app.stop)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
graceful = app.config.GRACEFUL_SHUTDOWN_TIMEOUT
start_shutdown = 0
while connections and (start_shutdown < graceful):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
remove_unix_socket(unix)
def _build_protocol_kwargs(
protocol: Type[HttpProtocol], config: Config
) -> dict:
if hasattr(protocol, "websocket_timeout"):
return {
"max_size": config.WEBSOCKET_MAX_SIZE,
"max_queue": config.WEBSOCKET_MAX_QUEUE,
"read_limit": config.WEBSOCKET_READ_LIMIT,
"write_limit": config.WEBSOCKET_WRITE_LIMIT,
"ping_timeout": config.WEBSOCKET_PING_TIMEOUT,
"ping_interval": config.WEBSOCKET_PING_INTERVAL,
}
return {}
def bind_socket(host: str, port: int, *, backlog=100) -> socket.socket:
"""Create TCP server socket.
:param host: IPv4, IPv6 or hostname may be specified
:param port: TCP port number
:param backlog: Maximum number of connections to queue
:return: socket.socket object
"""
try: # IP address: family must be specified for IPv6 at least
ip = ip_address(host)
host = str(ip)
sock = socket.socket(
socket.AF_INET6 if ip.version == 6 else socket.AF_INET
)
except ValueError: # Hostname, may become AF_INET or AF_INET6
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog)
return sock
def bind_unix_socket(path: str, *, mode=0o666, backlog=100) -> socket.socket:
"""Create unix socket.
:param path: filesystem path
:param backlog: Maximum number of connections to queue
:return: socket.socket object
"""
"""Open or atomically replace existing socket with zero downtime."""
# Sanitise and pre-verify socket path
path = os.path.abspath(path)
folder = os.path.dirname(path)
if not os.path.isdir(folder):
raise FileNotFoundError(f"Socket folder does not exist: {folder}")
try:
if not stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
raise FileExistsError(f"Existing file is not a socket: {path}")
except FileNotFoundError:
pass
# Create new socket with a random temporary name
tmp_path = f"{path}.{secrets.token_urlsafe()}"
sock = socket.socket(socket.AF_UNIX)
try:
# Critical section begins (filename races)
sock.bind(tmp_path)
try:
os.chmod(tmp_path, mode)
# Start listening before rename to avoid connection failures
sock.listen(backlog)
os.rename(tmp_path, path)
except: # noqa: E722
try:
os.unlink(tmp_path)
finally:
raise
except: # noqa: E722
try:
sock.close()
finally:
raise
return sock
def remove_unix_socket(path: str) -> None:
"""Remove dead unix socket during server exit."""
if not path:
return
try:
if stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
# Is it actually dead (doesn't belong to a new server instance)?
with socket.socket(socket.AF_UNIX) as testsock:
try:
testsock.connect(path)
except ConnectionRefusedError:
os.unlink(path)
except FileNotFoundError:
pass
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Create a listening socket or use the one in settings
sock = server_settings.get("sock")
unix = server_settings["unix"]
backlog = server_settings["backlog"]
if unix:
sock = bind_unix_socket(unix, backlog=backlog)
server_settings["unix"] = unix
if sock is None:
sock = bind_socket(
server_settings["host"], server_settings["port"], backlog=backlog
)
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
mp = multiprocessing.get_context("fork")
for _ in range(workers):
process = mp.Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
sock.close()
remove_unix_socket(unix)
|
system.py
|
# Copyright 2014-2019 Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilies for grokking the underlying system.
Variables:
* `HOSTNAME` (str) System hostname.
* `USERNAME` (str) Username.
* `UID` (int) User ID.
* `PID` (int) Process ID.
"""
from __future__ import print_function
import getpass
import os
import socket
import subprocess
import sys
import tempfile
import threading
import typing
from sys import platform
from labm8 import app
from labm8 import fs
HOSTNAME = socket.gethostname()
USERNAME = getpass.getuser()
UID = os.getuid()
PID = os.getpid()
argv = sys.argv
STDOUT = sys.stdout
STDERR = sys.stderr
PIPE = subprocess.PIPE
class Error(Exception):
pass
class SubprocessError(Error):
"""
Error thrown if a subprocess fails.
"""
pass
class CommandNotFoundError(Exception):
"""
Error thrown a system command is not found.
"""
pass
class ScpError(Error):
"""
Error thrown if scp file transfer fails.
"""
def __init__(self, stdout, stderr):
"""
Construct an ScpError.
Arguments:
stdout (str): Captured stdout of scp subprocess.
stderr (str): Captured stderr of scp subprocess.
"""
self.out = stdout
self.err = stderr
def __repr__(self):
return self.out + '\n' + self.err
def __str__(self):
return self.__repr__()
class Subprocess(object):
"""Subprocess abstraction.
Wrapper around subprocess.Popen() which provides the ability to
force a timeout after a number of seconds have elapsed.
"""
def __init__(
self,
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
decode_out=True,
):
"""
Create a new subprocess.
"""
self.cmd = cmd
self.process = None
self.stdout = None
self.stderr = None
self.shell = shell
self.decode_out = decode_out
self.stdout_dest = stdout
self.stderr_dest = stderr
def run(self, timeout=-1):
"""
Run the subprocess.
Arguments:
timeout (optional) If a positive real value, then timout after
the given number of seconds.
Raises:
SubprocessError If subprocess has not completed after "timeout"
seconds.
"""
def target():
self.process = subprocess.Popen(
self.cmd,
stdout=self.stdout_dest,
stderr=self.stderr_dest,
shell=self.shell,
)
stdout, stderr = self.process.communicate()
# Decode output if the user wants, and if there is any.
if self.decode_out:
if stdout:
self.stdout = stdout.decode('utf-8')
if stderr:
self.stderr = stderr.decode('utf-8')
thread = threading.Thread(target=target)
thread.start()
if timeout > 0:
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
raise SubprocessError(
('Reached timeout after {t} seconds'.format(t=timeout)),)
else:
thread.join()
return self.process.returncode, self.stdout, self.stderr
def is_linux():
return platform == 'linux' or platform == 'linux2'
def is_mac():
return platform == 'darwin'
def is_windows():
return platform == 'win32'
def run(command, num_retries=1, timeout=-1, **kwargs):
"""
Run a command with optional timeout and retries.
Provides a convenience method for executing a subprocess with
additional error handling.
Arguments:
command (list of str): The command to execute.
num_retries (int, optional): If the subprocess fails, the number of
attempts to execute it before failing.
timeout (float, optional): If positive, the number of seconds to wait
for subprocess completion before failing.
**kwargs: Additional args to pass to Subprocess.__init__()
Returns:
Tuple of (int, str, str): Where the variables represent
(exit status, stdout, stderr).
Raises:
SubprocessError: If the command fails after the given number of
retries.
"""
last_error = None
for _ in range(num_retries):
try:
process = Subprocess(command, **kwargs)
return process.run(timeout)
except Exception as err:
last_error = err
raise last_error
def sed(match, replacement, path, modifiers=''):
"""Perform sed text substitution.
This requires GNU sed. On MacOS, install it using:
$ brew "gnu-sed"
And then ensure that it is in the PATH before the OS-shipped sed:
$ export PATH="/usr/local/opt/gnu-sed/libexec/gnubin:$PATH"
"""
cmd = "sed -r -i 's/%s/%s/%s' %s" % (match, replacement, modifiers, path)
process = Subprocess(cmd, shell=True)
ret, out, err = process.run(timeout=60)
if ret:
raise SubprocessError('Sed command failed!')
def echo(*args, **kwargs):
"""
Write a message to a file.
Arguments:
args A list of arguments which make up the message. The last argument
is the path to the file to write to.
"""
msg = args[:-1]
path = fs.path(args[-1])
append = kwargs.pop('append', False)
if append:
with open(path, 'a') as file:
print(*msg, file=file, **kwargs)
else:
with open(fs.path(path), 'w') as file:
print(*msg, file=file, **kwargs)
def which(program, path=None):
"""
Returns the full path of shell commands.
Replicates the functionality of system which (1) command. Looks
for the named program in the directories indicated in the $PATH
environment variable, and returns the full path if found.
Examples:
>>> system.which("ls")
"/bin/ls"
>>> system.which("/bin/ls")
"/bin/ls"
>>> system.which("not-a-real-command")
None
>>> system.which("ls", path=("/usr/bin", "/bin"))
"/bin/ls"
Arguments:
program (str): The name of the program to look for. Can
be an absolute path.
path (sequence of str, optional): A list of directories to
look for the pgoram in. Default value is system $PATH.
Returns:
str: Full path to program if found, else None.
"""
# If path is not given, read the $PATH environment variable.
path = path or os.environ['PATH'].split(os.pathsep)
abspath = True if os.path.split(program)[0] else False
if abspath:
if fs.isexe(program):
return program
else:
for directory in path:
# De-quote directories.
directory = directory.strip('"')
exe_file = os.path.join(directory, program)
if fs.isexe(exe_file):
return exe_file
return None
def isprocess(pid, error=False):
"""
Check that a process is running.
Arguments:
pid (int): Process ID to check.
Returns:
True if the process is running, else false.
"""
try:
# Don't worry folks, no processes are harmed in the making of
# this system call:
os.kill(pid, 0)
return True
except OSError:
return False
def exit(status=0):
"""
Terminate the program with the given status code.
"""
if status == 0:
print('Done.', file=sys.stderr)
else:
print('Error {0}'.format(status), file=sys.stderr)
sys.exit(status)
def ProcessFileAndReplace(
path: str,
process_file_callback: typing.Callable[[str, str], None],
tempfile_prefix: str = 'labm8_system_',
tempfile_suffix: str = None,
) -> None:
"""Process a file and replace with the generated file.
This function provides the functionality of inplace file modification for
functions which take an input file and produce an output file. It does this
by creating a temporary file which, if the function returns successfully (i.e.
without exception), will overwrite the original file.
Args:
path: The path of the file to process inplace.
process_file_callback: A function which takes two arguments - the path of
an input file, and the path of an output file.
tempfile_prefix: An optional name prefix for the temporary file.
tempfile_suffix: An optional name suffix for the temporary file.
"""
with tempfile.NamedTemporaryFile(
prefix=tempfile_prefix,
suffix=tempfile_suffix,
delete=False,
) as f:
tmp_path = f.name
try:
process_file_callback(path, tmp_path)
os.rename(tmp_path, path)
finally:
if os.path.isfile(tmp_path):
os.unlink(tmp_path)
def CheckCallOrDie(cmd: typing.List[str]) -> None:
"""Run the given command and exit fatally on error."""
try:
app.Log(2, '$ %s', ' '.join(cmd))
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
app.FatalWithoutStackTrace(
'Command: `%s` failed with error: %s',
' '.join(cmd),
e,
)
|
SyncServiceClient.py
|
""" SyncServiceClient is a client SDK of the Sync-Service written in Python
Exported classes: Client and MetaData
"""
import json
import shutil
import sys
import threading
import string
import time
from UnixSocketSupport import ExtendedPoolManager
import urllib3
""" MetaData is used to represent the metadata of an object in the Sync Service.
Fields:
object_id is a unique identifier of the object.
object_id and object_type must uniquely identify the object.
Must be provided by the application
object_type is the type of the object.
The type is used to group multiple objects, for example when checking for
object updates.
Must be provided by the application
dest_id is the ID of the destination. If omitted the object is sent to all ESSs with
the same destination type.
This field is ignored when working with ESS (the destination is the CSS).
dest_type is the type of destination to send the object to.
If omitted (and if destinations_list is omitted too) the object is broadcasted
to all known destinations.
This field is ignored when working with ESS (the destination is always the CSS).
destinations_list is the list of destinations as type:id pairs to send the object to.
When a DestinationsList is provided destination type and destination ID must be omitted.
This field is ignored when working with ESS (the destination is always the CSS).
expiration is a timestamp/date indicating when the object expires.
When the object expires it is automatically deleted.
The timestamp should be provided in RFC3339 format.
This field is available only when working with the CSS.
Optional field, if omitted the object doesn't expire.
version is the object's version (as used by the application).
Optional field, empty by default.
description is a textual description of the object.
Optional field, empty by default.
link is a link to where the data for this object can be fetched from.
Optional field, if omitted the data must be provided by the application.
inactive is a flag indicating that this object is inactive for now.
An object can be created as inactive which means it is not delivered to its
destination. The object can be activated later.
Optional field, default is false (object active).
activation_time is a timestamp/date as to when this object should automatically be activated.
The timestamp should be provided in RFC3339 format.
Optional field, if omitted (and Inactive is true) the object is never automatically
activated.
do_not_send is a flag indicating that this object should not be sent to any destinations.
Optional field, default is false (object is sent to destinations).
no_data is a flag indicating that there is no data for this object.
Objects with no data can be used, for example, to send notifications.
Optional field, default is false (object includes data).
meta_only MetaOnly is a flag that indicates that this update is only of the metadata. The
current object's data is left unchanged.
Optional field, default is false (both data and metadata are updated).
expected_consumers is the number of applications that are expected to indicate that they have consumed
the object.
Optional field, default is 1.
This field is used only when working with the CSS. The default value is always used
on the ESS.
destination_data_uri is a URI indicating where the receiver of the object should store it.
Currently only file URIs are supported.
This field is available only when working with the CSS.
Optional field, if omitted the object is stored in the node's internal storage.
source_data_uri is a URI indicating where the sender of the object should read the data from.
Currently only file URIs are supported.
This field is available only when working with the ESS.
Optional field, if omitted the object's data should be provided by the user.
auto_delete is a flag indicating whether to delete the object after it is delivered to all its
destinations from the destinations list.
Optional field, default is false (do not delete).
This field is used only when working with the CSS. Objects are always deleted after
delivery on the ESS.
deleted is a flag indicating to applications polling for updates that this object has been
deleted.
Read only field, should not be set by users.
origin_id is the ID of origin of the object. Set by the internal code.
Read only field, should not be set by users.
origin_type is the type of origin of the object. Set by the internal code.
Read only field, should not be set by users.
instance_id is an internal identifier of the object. Set by the internal code.
Read only field, should not be set by users.
"""
class MetaData:
def __init__(self, _json=None):
if _json != None:
self.activation_time = str(_json.get("activationTime", ""))
self.auto_delete = _json.get("autodelete", False)
self.expected_consumers = _json.get("consumers", 1)
self.deleted = _json.get("deleted", False)
self.description = str(_json.get("description", ""))
self.destination_data_uri = str(_json.get("destinationDataUri", ""))
self.dest_id = str(_json.get("destinationID", ""))
self.destination_org_id = str(_json.get("destinationOrgID", ""))
self.dest_type = str(_json.get("destinationType", ""))
self.destinations_list = _json.get("destinationsList", [])
self.do_not_send = _json.get("doNotSend", False)
self.expiration = str(_json.get("expiration", ""))
self.inactive = _json.get("inactive", False)
self.link = str(_json.get("link", ""))
self.meta_only = _json.get("metaOnly", False)
self.no_data = _json.get("noData", False)
self.object_id = str(_json.get("objectID", ""))
self.object_type = str(_json.get("objectType", ""))
self.origin_id = str(_json.get("originID", ""))
self.origin_type = str(_json.get("originType", ""))
self.source_data_uri = str(_json.get("sourceDataUri", ""))
self.version = str(_json.get("version", ""))
self.instance_id = _json.get("instanceID", 0)
else:
self.activation_time = ""
self.auto_delete = False
self.expected_consumers = 1
self.deleted = False
self.description = ""
self.dest_id = ""
self.destination_data_uri = ""
self.dest_type = ""
self.destination_org_id = ""
self.destinations_list = []
self.do_not_send = False
self.expiration = ""
self.inactive = False
self.link = ""
self.meta_only = False
self.no_data = False
self.object_id = ""
self.object_type = ""
self.origin_id = ""
self.origin_type = ""
self.source_data_uri = ""
self.version = ""
self.instance_id = 0
def _dict(self):
result = {
"activationTime": self.activation_time, "autodelete": self.auto_delete,
"consumers": self.expected_consumers, "deleted": self.deleted,
"description": self.description, "destinationDataUri": self.destination_data_uri,
"destinationID": self.dest_id, "destinationOrgID": self.destination_org_id,
"destinationType": self.dest_type,
"doNotSend": self.do_not_send, "expiration": self.expiration,
"inactive": self.inactive, "link": self.link,
"metaOnly": self.meta_only, "noData": self.no_data,
"objectID": self.object_id, "objectType": self.object_type,
"originID": self.origin_id, "originType": self.origin_type,
"sourceDataUri": self.source_data_uri, "version": self.version,
"instanceID": self.instance_id
}
if len(self.destinations_list) != 0:
result["destinationsList"] = self.destinations_list
return result
def __str__(self):
return "{ " + "\"activationTime\": \"" + self.activation_time + "\", " + \
"\"autodelete\": \"" + str(self.auto_delete) + "\", " + \
"\"expected_consumers\": \"" + str(self.expected_consumers) + "\", " + \
"\"deleted\": \"" + str(self.deleted) + "\", " + \
"\"description\": \"" + self.description + "\", " + \
"\"destination_data_uri\": \"" + self.destination_data_uri + "\", " + \
"\"dest_id\": \"" + self.dest_id + "\", " + \
"\"destination_org_id\": \"" + self.destination_org_id + "\", " + \
"\"dest_type\": \"" + self.dest_type + "\", " + \
"\"do_not_send\": \"" + str(self.do_not_send) + "\", " + \
"\"expiration\": \"" + self.expiration + "\", " + \
"\"inactive\": \"" + str(self.inactive) + "\", " + \
"\"link\": \"" + self.link + "\", " + \
"\"meta_only\": \"" + str(self.meta_only) + "\", " + \
"\"no_data\": \"" + str(self.no_data) + "\", " + \
"\"object_id\": \"" + self.object_id + "\", " + \
"\"object_type\": \"" + self.object_type + "\", " + \
"\"origin_id\": \"" + self.origin_id + "\", " + \
"\"origin_type\": \"" + self.origin_type + "\", " + \
"\"source_data_uri\": \"" + self.source_data_uri + "\", " + \
"\"version\": \"" + self.version + "\", " + \
"\"instance_id\": \"" + str(self.instance_id) + "\"" + \
" }"
def __unicode__(self):
return u'n/a'
def __repr__(self):
return self.__str__()
""" Destination defines an edge node (an ESS) that has connected to a CSS
dest_org_id is the destination organization ID
Each Sync Service destination belongs to a single organization
dest_type is the destination type
dest_id is the destination ID
communication is the communications method used by the destination to connect (can be MQTT or HTTP)
code_version is the sync service code version used by the destination
"""
class Destination:
def __init__(self, _json=None):
if _json != None:
self.dest_org_id = str(_json.get("destinationOrgID", ""))
self.dest_type = str(_json.get("destinationType", ""))
self.dest_id = str(_json.get("destinationID", ""))
self.communication = str(_json.get("communication", ""))
self.code_version = str(_json.get("codeVersion", ""))
else:
self.dest_org_id = ""
self.dest_type = ""
self.dest_id = ""
self.communication = ""
self.code_version = ""
def __str__(self):
return "{ " + "\"dest_org_id\": \"" + self.dest_org_id + "\", " + \
"\"dest_type\": \"" + self.dest_type + "\", " + \
"\"dest_id\": \"" + self.dest_id + "\", " + \
"\"communication\": \"" + self.communication + "\", " + \
"\"code_version\": \"" + self.code_version + "\"" + \
" }"
def __unicode__(self):
return u'n/a'
def __repr__(self):
return self.__str__()
""" DestinationStatus provides information about the delivery status of an object for a certain destination.
dest_type is the destination type
dest_id is the destination ID
status is the destination status
message is the message for the destination
The status can be one of the following:
pending - indicates that the object is pending delivery to this destination
delivering - indicates that the object is being delivered to this destination
delivered - indicates that the object was delivered to this destination
consumed - indicates that the object was consumed by this destination
deleted - indicates that this destination acknowledged the deletion of the object
error - indicates that a feedback error message was received from this destination
"""
class DestinationStatus:
def __init__(self, _json=None):
if _json != None:
self.dest_type = str(_json.get("destinationType", ""))
self.dest_id = str(_json.get("destinationID", ""))
self.status = str(_json.get("status", ""))
self.message = str(_json.get("message", ""))
else:
self.dest_type = ""
self.dest_id = ""
self.status = ""
self.message = ""
def __str__(self):
return "{ " + "\"dest_type\": \"" + self.dest_type + "\", " + \
"\"dest_id\": \"" + self.dest_id + "\", " + \
"\"status\": \"" + self.status + "\", " + \
"\"message\": \"" + self.message + "\"" + \
" }"
def __unicode__(self):
return u'n/a'
def __repr__(self):
return self.__str__()
""" ObjectStatus provides information about an object that is destined for a particular destination
org_id is the organization ID of the object
object_type is the type of the object
object_id is the ID of the object
status is the status of the object for this destination
"""
class ObjectStatus:
def __init__(self, _json=None):
if _json != None:
self.org_id = str(_json.get("orgID", ""))
self.object_type = str(_json.get("objectType", ""))
self.object_id = str(_json.get("objectID", ""))
self.status = str(_json.get("status", ""))
else:
self.org_id = ""
self.object_type = ""
self.object_id = ""
self.status = ""
def __str__(self):
return "{ " + "\"org_id\": \"" + self.org_id + "\", " + \
"\"object_type\": \"" + self.object_type + "\", " + \
"\"object_id\": \"" + self.object_id + "\", " + \
"\"status\": \"" + self.status + "\"" + \
" }"
def __unicode__(self):
return u'n/a'
def __repr__(self):
return self.__str__()
""" Sync Service client handle object
"""
class Client:
_destinations_path = "/api/v1/destinations"
_objects_path = "/api/v1/objects/"
_resend_path = "/api/v1/resend"
_security_path = "/api/v1/security/"
_destination_acl = "destinations"
_object_acl = "objects"
""" Constructor
serviceProtocol defines the protocol used to connect to the Sync Service. It should be either "https",
"http", "unix", or "secure-unix".
If serviceProtocol is either "https" or "http", serviceAddress and servicePort specify the address and
listening port of the Sync Service, respectively.
If serviceProtocol is "unix" or "secure-unix", serviceAddress should contain the socket file used by
the ESS, servicePort can be zero.
Note: The serviceProtocol can be "unix" or "secure-unix", only when communicating with an ESS.
"""
def __init__(self, service_protocol, service_address, service_port):
self._service_protocol = service_protocol
if service_protocol == 'unix':
self._service_address = "unix:8080"
self._unix_socket_path = service_address
elif service_protocol == 'secure-unix':
self._service_address = "secure-unix:8080"
self._unix_socket_path = service_address
else:
self._service_address = service_address + ":" + str(service_port)
self._unix_socket_path = ''
self._app_key = ""
self._app_secret = ""
self._app_key_and_secret_set = False
self.org_id = ""
self._http_client = self._get_pool_manager()
""" set_org_id
Sets the organization ID used in requests to the Sync Service.
This should only invoked if working with a Cloud Sync Service (CSS).
"""
def set_org_id(self, org_id):
self.org_id = org_id
""" set_ca_certificate
Sets the CA certificate used on client secured connections if needed.
"""
def set_ca_certificate(self, cert_pem):
self._http_client = self._get_pool_manager(ca_certs=cert_pem)
""" set_app_key_and_secret
Sets the app key and app secret to be used when communicating with Sync Service.
The app key and app secret are used to authenticate with the Sync Service that the client is
communicating with. The exact details of the app key and app secret depend on the Sync Service's configuration.
key is the app key to be used.
secret is the app secret to be used.
"""
def set_app_key_and_secret(self, key, secret):
self._app_key = key
self._app_secret = secret
self._app_key_and_secret_set = True
""" start_polling_for_updates
Starts the polling of the Sync Service for updates.
Each invocation starts a thread that periodically polls the Sync Service for new update for a specific
object type.
object_type specifies the type of objects the client should retrieve updates for.
rate is the period, in seconds, between poll requests.
callback specifies a function to be called when an object has been updated on the Sync Service. It will
be called with a single parameter, which will be an instance of the class MetaData.
"""
def start_polling_for_updates(self, object_type, rate, callback):
self._keep_on_polling = True
url = self._create_object_url(object_type, "", "")
c = threading.Thread(target=self._poller, args=[url, rate, callback])
c.setDaemon(True)
c.start()
def _poller(self, url, rate, callback):
firstPoll = True
while self._keep_on_polling:
time.sleep(rate)
actualUrl = url
if firstPoll:
actualUrl = actualUrl + "?received=true"
try:
response = self._request_helper("GET", actualUrl)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
for item in data:
callback(MetaData(_json=item))
if (response.status >= 200 and response.status < 300) or response.status == 404:
firstPoll = False
except:
(_, exc_value, _) = sys.exc_info()
print exc_value
print "Stopped polling for updates"
""" stop_polling_for_updates
Stops the polling of the Sync Service for updates.
"""
def stop_polling_for_updates(self):
self._keep_on_polling = False
""" fetch_object_data
Fetches the data for an object given its metadata.
meta_data is the metadata instance of the object whose data is to be fetched.
writer is a "file like object" to which the fetched data is written.
Returns True if the operation succeeded, False otherwise.
"""
def fetch_object_data(self, meta_data, writer):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "data")
response = self._request_helper("GET", url, preload_content=False)
result = response.status == 200
if result:
shutil.copyfileobj(response, writer)
response.release_conn()
return result
""" activate_object
Tells the Sync Service to mark an object as active.
meta_data is the metadata of the object that should be activated.
Only objects that were created as inactive need to be activated, see ObjectMetaData.inactive.
Returns True if the operation succeeded, False otherwise.
"""
def activate_object(self, meta_data):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "activate")
response = self._request_helper("PUT", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" mark_object_consumed
Tells the Sync Service to mark an object consumed.
meta_data is the metadata of the object that should marked consumed.
After an object is marked as consumed it will not be delivered to the application again
(even if the app or the Sync Service are restarted).
Returns True if the operation succeeded, False otherwise.
"""
def mark_object_consumed(self, meta_data):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "consumed")
response = self._request_helper("PUT", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" mark_object_deleted
Tells the ESS to mark an object that was deleted on the CSS as having been deleted on the ESS.
meta_data is the metadata of the object to be marked as deleted.
Returns True if the operation succeeded, False otherwise.
"""
def mark_object_deleted(self, meta_data):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "deleted")
response = self._request_helper("PUT", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" mark_object_received
Tells the Sync Service to mark an object received.
meta_data is the metadata of the object that should be marked received.
After an object is marked as received it will not be delivered to the application again,
unless the app restarts polling for updates.
Returns True if the operation succeeded, False otherwise.
"""
def mark_object_received(self, meta_data):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "received")
response = self._request_helper("PUT", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" update_object
Creates/updates an object in the Sync Service.
meta_data specifies the object's metadata.
The application must provide the ObjectID and ObjectType which uniquely identify the object. When
creating/updating an object in the CSS the application must also provide either DestID and DestType
or DestinationsList. All other fields in ObjectMetaData are optional and if not specified will take
the default values.
Returns True if the operation succeeded, False otherwise.
"""
def update_object(self, meta_data):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "")
payload = {"meta": meta_data._dict()}
response = self._request_helper("PUT", url,
body=json.dumps(payload).encode('utf-8'),
headers={'Content-Type': 'application/json'})
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" update_object_data
Updates the data of an object in the Sync Service.
meta_data is the object's metadata (the one used to create the object in update_object).
reader is a "file like object" from which to read the object's data.
Note that the object's data can be updated multiple times without updating the metadata.
Returns True if the operation succeeded, False otherwise.
"""
def update_object_data(self, meta_data, reader):
url = self._create_object_url(meta_data.object_type, meta_data.object_id, "data")
response = self._request_helper("PUT", url, preload_content=False, body=reader)
result = response.status == 200
response.release_conn()
return result
""" delete_object
Deletes an object in the Sync Service
object_type is the object type of the object being deleted
object_id is the object ID of the object being deleted
Returns True if the operation succeeded, False otherwise.
"""
def delete_object(self, object_type, object_id):
url = self._create_object_url(object_type, object_id, "")
response = self._request_helper("DELETE", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" get_object_metadata
Retrieves the metadata for the specified object
Returns a tuple of a tMetaData object and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def get_object_metadata(self, object_type, object_id):
url = self._create_object_url(object_type, object_id, "")
try:
response = self._request_helper("GET", url)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
result = MetaData(_json=data)
return result, True
elif response.status == 404:
return None, True
else:
print "Received a response of", response.status
return None, False
except:
(_, exc_value, _) = sys.exc_info()
print exc_value
return None, False
""" get_object_status
Returns the status of an object.
Returns a tuple of a string and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
The string will have one of the following values:
notReady - The object is not ready to be sent to the destination.
ready - The object is ready to be sent but was not yet received by the destination.
received - The destination received the object's metadata but not all its data.
completelyReceived - The destination received the full object (metadata and data).
consumed - The object was consumed by the application running on the destination.
deleted - The object was deleted by the destination.
Note: An empty string indicates that the object is not on the server
"""
def get_object_status(self, object_type, object_id):
url = self._create_object_url(object_type, object_id, "status")
try:
response = self._request_helper("GET", url)
if response.status == 200:
result = response.data.decode('utf-8')
return result, True
elif response.status == 404:
return "", True
else:
print "Received a response of", response.status
return None, False
except:
(_, exc_value, _) = sys.exc_info()
print exc_value
return None, False
""" get_object_destinations
Returns the list of destinations that an object is being sent to, along with the
status of each "transmission"
Returns a tuple of an array of DestinationStatus objects and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def get_object_destinations(self, object_type, object_id):
url = self._create_object_url(object_type, object_id, "destinations")
return self._request_and_response_helper("GET", url, DestinationStatus)
""" get_destinations
get_destinations returns the list of registered edge nodes under an organization in the CSS.
Returns a tuple of an array of Destination objects and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def get_destinations(self):
url = self._service_protocol + "://" + self._service_address + Client._destinations_path
if len(self.org_id) != 0:
url = url + "/" + self.org_id
return self._request_and_response_helper("GET", url, Destination)
""" get_destination_objects
get_destination_objects returns the list of objects targeted at the specified destination
Returns a tuple of an array of ObjectStatus and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def get_destination_objects(self, dest_type, dest_id):
url = self._service_protocol + "://" + self._service_address + Client._destinations_path
if len(self.org_id) != 0:
url = url + "/" + self.org_id
url = url + "/" + dest_type + "/" + dest_id + "/objects"
return self._request_and_response_helper("GET", url, ObjectStatus)
""" resend
Resend requests that all objects in the Sync Service be resent to an ESS.
Used by an ESS to ask the CSS to resend it all the objects (supported only for ESS to CSS requests).
An application only needs to use this API in case the data it previously obtained from the ESS was lost.
"""
def resend(self):
url = self._service_protocol + "://" + self._service_address + Client._resend_path
response = self._request_helper("POST", url)
if response.status >= 200 and response.status < 300:
return True
else:
return False
""" register_webhook
Registers a webhook to receive updates from the Sync Service.
Returns True if the operation succeeded, False otherwise.
"""
def register_webhook(self, object_type, url):
return self._webhook_helper("register", object_type, url)
""" delete_webhook
Deletes a webhook that was previously registered with RegisterWebhook.
Returns True if the operation succeeded, False otherwise.
"""
def delete_webhook(self, object_type, url):
return self._webhook_helper("delete", object_type, url)
""" add_users_to_destination_acl
Adds users to an ACL protecting a destination type.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Note: Adding the first user to such an ACL automatically creates it.
Returns True if the operation succeeded, False otherwise.
Note: This API is for use with a CSS only.
"""
def add_users_to_destination_acl(self, dest_type, usernames):
return self._modify_security_helper(True, Client._destination_acl, dest_type, usernames)
""" remove_users_from_destination_acl
Removes users from an ACL protecting a destination type.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Note: Removing the last user from such an ACL automatically deletes it.
Returns True if the operation succeeded, False otherwise.
Note: This API is for use with a CSS only.
"""
def remove_users_from_destination_acl(self, dest_type, usernames):
return self._modify_security_helper(False, Client._destination_acl, dest_type, usernames)
""" retrieve_destination_acl
Retrieves the list of users with access to a destination type protected by an ACL.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Returns a tuple of an array of strings and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
Note: This API is for use with a CSS only.
"""
def retrieve_destination_acl(self, dest_type):
return self._retrieve_acl_helper(Client._destination_acl, dest_type)
""" retrieve_all_destination_acls
Retrieves the list of destination ACLs in the organization.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Returns a tuple of an array of strings and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
Note: This API is for use with a CSS only.
"""
def retrieve_all_destination_acls(self):
return self._retrieve_acl_helper(Client._destination_acl, "")
""" add_users_to_object_acl
Adds users to an ACL protecting an object type.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Note: Adding the first user to such an ACL automatically creates it.
Returns True if the operation succeeded, False otherwise.
"""
def add_users_to_object_acl(self, object_type, usernames):
return self._modify_security_helper(True, Client._object_acl, object_type, usernames)
""" remove_users_from_object_acl
Removes users from an ACL protecting an object type.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Note: Removing the last user from such an ACL automatically deletes it.
Returns True if the operation succeeded, False otherwise.
"""
def remove_users_from_object_acl(self, object_type, usernames):
return self._modify_security_helper(False, Client._object_acl, object_type, usernames)
""" retrieve_object_acl
Retrieves the list of users with access to an object type protected by an ACL.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Returns a tuple of an array of strings and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def retrieve_object_acl(self, object_type):
return self._retrieve_acl_helper(Client._object_acl, object_type)
""" retrieve_all_object_acls
Retrieves the list of object ACLs in the organization.
For more information on the sync service's security model see: https://github.ibm.com/edge-sync-service-dev/edge-sync-service#security
Returns a tuple of an array of strings and a boolean. The boolean will be True
if the operation succeeded, False otherwise.
"""
def retrieve_all_object_acls(self):
return self._retrieve_acl_helper(Client._object_acl, "")
def _create_object_url(self, object_type, object_id, command):
url = self._service_protocol + "://" + self._service_address + Client._objects_path
if len(self.org_id) != 0:
url = url + self.org_id + "/"
url = url + object_type
if len(object_id) != 0:
url = url + "/" + object_id
if len(command) != 0:
url = url + "/" + command
return url
def _request_helper(self, method, url, **kwargs):
arguments = dict()
for key in kwargs:
arguments[key] = kwargs[key]
if self._app_key_and_secret_set:
auth_header = urllib3.make_headers(basic_auth=self._app_key+ ":"+self._app_secret)
if 'headers' not in arguments:
arguments['headers'] = dict()
for header in auth_header:
arguments['headers'][header] = auth_header[header]
return self._http_client.request(method, url, **arguments)
def _request_and_response_helper(self, method, url, result_class):
try:
response = self._request_helper(method, url)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
results = []
for item in data:
results.append(result_class(_json=item))
return results, True
elif response.status == 404:
return [], True
else:
print "Received a response of", response.status
return [], False
except:
(_, exc_value, _) = sys.exc_info()
print exc_value
return [], False
def _webhook_helper(self, action, object_type, webhook):
url = self._create_object_url(object_type, "", "")
payload = {"action": action, "url": webhook}
response = self._request_helper("PUT", url,
body=json.dumps(payload).encode('utf-8'),
headers={'Content-Type': 'application/json'})
if response.status >= 200 and response.status < 300:
return True
else:
return False
def _modify_security_helper(self, add, acl_type, key, usernames):
action = "remove"
if add:
action = "add"
url = self._service_protocol + "://" + self._service_address + \
Client._security_path + acl_type + "/" + self.org_id + "/" + key
payload = {"action": action, "usernames": usernames}
response = self._request_helper("PUT", url,
body=json.dumps(payload).encode('utf-8'),
headers={'Content-Type': 'application/json'})
if response.status >= 200 and response.status < 300:
return True
else:
return False
def _retrieve_acl_helper(self, acl_type, key):
url = self._service_protocol + "://" + self._service_address + \
Client._security_path + acl_type + "/" + self.org_id
if len(key) != 0:
url = url + "/" + key
try:
response = self._request_helper("GET", url)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
results = []
for item in data:
results.append(str(item))
return results, True
elif response.status == 404:
return [], True
else:
print "Received a response of", response.status
return [], False
except:
(_, exc_value, _) = sys.exc_info()
print exc_value
return [], False
def _get_pool_manager(self, **kwargs):
arguments = kwargs.copy()
if self._unix_socket_path != '':
arguments['unix_socket_path'] = self._unix_socket_path
return ExtendedPoolManager(**arguments)
class _ClientPoller ():
pass
|
build_database.py
|
#!/usr/bin/env python3
# Copyright (c) 2021 Burak Can
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
"""Build allowed documents database for emrtd_face_access"""
import argparse
import threading
import PySimpleGUI as sg
from smartcard.Exceptions import CardConnectionException
from smartcard.util import toHexString
from tinydb import TinyDB, Query
from emrtd_face_access.apdu import APDU
from emrtd_face_access.card_comms import send, wait_for_card, CardCommunicationError
from emrtd_face_access.ocr import capture_mrz
from emrtd_face_access.mrz import other_mrz, parse_mrz_text
from emrtd_face_access.secure_messaging_object import SMObject
from emrtd_face_access.bac import establish_bac_session_keys, SessionKeyEstablishmentError
from emrtd_face_access.file_operations import EFReadError, read_data_from_ef, get_dg1_content
from emrtd_face_access.byte_operations import nb
from emrtd_face_access.print_to_sg import SetInterval
print = SetInterval().print
def parse_arguments() -> argparse.Namespace:
"""parse arguments"""
parser = argparse.ArgumentParser(
description="Build an allowed document database for emrtd_face_access"
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-add", dest="insert", action="store_true", help="(default) Add a card to the database"
)
group.add_argument(
"-delete", dest="insert", action="store_false", help="Remove a card from the database"
)
parser.set_defaults(insert=True)
args = parser.parse_args()
return args
def main_event_loop(args: argparse.Namespace, window: sg.Window):
"""
Main GUI event loop
"""
run = True
db = TinyDB("db/db.json")
while True:
event, values = window.read(timeout=20)
if event == sg.WIN_CLOSED:
return
elif event == "-SHOW WARNING-":
if args.insert:
if (
sg.popup_yes_no(
"Be aware that no security checks are made\n"
"This card MRZ will be added to the database if it does not already exist\n"
"Run the main program with this card before making sure that it is safe to add\n"
"Are you sure you want to add this card?"
)
== "Yes"
):
database_obj = Query()
if db.search(database_obj.mrz == values[event]) == []:
db.insert({"mrz": values[event]})
print("[+] Card is added to the database")
else:
print("[i] Card is already in the database")
else:
print("[-] Card is NOT added to the database")
else:
if (
sg.popup_yes_no(
"Card is going to be removed from the database.\n" "Are you sure?"
)
== "Yes"
):
database_obj = Query()
if db.search(database_obj.mrz == values[event]) == []:
print("[-] Card is not in the database")
else:
db.remove(database_obj.mrz == values[event])
print("[+] Card is removed from the database")
else:
print("[-] Card is NOT removed from the database")
run = True
print("[i] Restarting...")
elif event == "-PROBLEM IN EITHER READ OR DOCUMENT-":
sg.popup_ok(
"Problem in either the MRZ scan or the document files\n"
"Check the logs! Restarting..."
)
run = True
elif event == "-SHOW MRZ-":
window["camera_image"].update(data=values[event][0])
elif event == "-HIDE MRZ-":
window["camera_image"].update(filename="", size=(320, 240))
elif event == "-RAISED EXCEPTION-":
print("[!] Problem occured! Restarting...")
run = True
elif event == "-PRINT-":
window["output_window"].print(values[event])
if run:
threading.Thread(target=database_builder_loop, args=(window,), daemon=True).start()
run = False
def database_builder_loop(window: sg.Window):
camera_id = -1
mrz, _ = capture_mrz(window, camera_id)
mrz_scan = "".join(mrz)
print(f"[i] MRZ Read:\n{mrz_scan}")
document_number, birthdate, expiry_date, _, _, _ = parse_mrz_text(mrz)
mrz_information = other_mrz(document_number, birthdate, expiry_date)
sm_object = SMObject(wait_for_card())
atr = sm_object.channel.getATR()
print("[+] Card ATR: " + toHexString(atr))
# Select eMRTD Applet
print("[+] Selecting LDS DF AID: A0000002471001...")
aid = bytes.fromhex("A0000002471001")
try:
send(sm_object, APDU(b"\x00", b"\xA4", b"\x04", b"\x0C", Lc=nb(len(aid)), cdata=aid))
except CardCommunicationError:
window.write_event_value("-RAISED EXCEPTION-", "")
return
except CardConnectionException as ex:
print(ex)
window.write_event_value("-RAISED EXCEPTION-", "")
return
## SECURE MESSAGING ##
try:
establish_bac_session_keys(sm_object, mrz_information.encode("utf-8"))
except SessionKeyEstablishmentError as ex:
print(ex)
print("[-] Error while establishing BAC session keys")
window.write_event_value("-RAISED EXCEPTION-", "")
return
except CardCommunicationError:
window.write_event_value("-RAISED EXCEPTION-", "")
return
except CardConnectionException as ex:
print(ex)
window.write_event_value("-RAISED EXCEPTION-", "")
return
# Read EF.DG1
try:
dg1 = read_data_from_ef(window, sm_object, b"\x01\x01", "EF.DG1")
except EFReadError as ex:
print(ex)
print("[-] Error while reading file EF.DG1.")
window.write_event_value("-RAISED EXCEPTION-", "")
return
except CardCommunicationError:
window.write_event_value("-RAISED EXCEPTION-", "")
return
except CardConnectionException as ex:
print(ex)
window.write_event_value("-RAISED EXCEPTION-", "")
return
else:
mrz_read = get_dg1_content(dg1).decode("utf-8")
print(mrz_read)
if mrz_read == mrz_scan:
window.write_event_value("-SHOW WARNING-", mrz_read)
else:
window.write_event_value("-PROBLEM IN EITHER READ OR DOCUMENT-", "")
if __name__ == "__main__":
a = parse_arguments()
sg.theme("Black")
# fmt: off
layout = [
[sg.Image(filename="", key="camera_image")],
[sg.Multiline(font="Courier 12", size=(80, 10), key="output_window", autoscroll=True,
auto_refresh=True, write_only=True, disabled=True, text_color="black")]]
# fmt: on
w = sg.Window("Database Builder", layout, location=(800, 400), element_justification="c")
SetInterval().initialize(w, 0.1)
SetInterval().start()
main_event_loop(a, w)
SetInterval().cancel()
w.close()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import shlex
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version,
is_windows, is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku,
windows_proof_rmtree, python_command, version_compare,
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class PatchModule:
'''
Fancy monkey-patching! Whee! Can't use mock.patch because it only
patches in the local namespace.
'''
def __init__(self, func, name, impl):
self.func = func
assert(isinstance(name, str))
self.func_name = name
self.old_impl = None
self.new_impl = impl
def __enter__(self):
self.old_impl = self.func
exec('{} = self.new_impl'.format(self.func_name))
def __exit__(self, *args):
exec('{} = self.old_impl'.format(self.func_name))
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST)
# Test that bad initialization fails
self.assertRaises(TypeError, cargsfunc, [])
self.assertRaises(TypeError, cargsfunc, [], [])
self.assertRaises(TypeError, cargsfunc, cc, [], [])
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(['-I.', '-I..'], cc)
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(['-I.', '-I.'], cc), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', mesonbuild.compilers.CompilerType.GCC_STANDARD, False, MachineChoice.HOST)
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix())
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix())
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix())
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_syntax_highlighting_files(self):
'''
Ensure that syntax highlighting files were updated for new functions in
the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '169 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '55 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '63 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officialy in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '145 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '59 test env doesn\'t stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '134 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '135 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in shlex.split(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in shlex.split(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), shlex.split(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_MINGW)
elif is_cygwin():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_CYGWIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_STANDARD)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_OSX)
elif is_windows():
# Not implemented yet
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_MINGW)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_STANDARD)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_WIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_STANDARD)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += shlex.quote(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += shlex.quote(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '138 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = shlex.split(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '137 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse shlex.split() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '114 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '61 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '95 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def dist_impl(self, vcs_init):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
vcs_init(project_dir)
self.init(project_dir)
self.build('dist')
distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
checksumfile = distfile + '.sha256sum'
self.assertPathExists(distfile)
self.assertPathExists(checksumfile)
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '43 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '155 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '155 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.compiler_type.is_windows_compiler or
compiler.compiler_type.is_osx_compiler):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '44 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
for lang in ('c', 'cpp'):
for target_type in ('executable', 'library'):
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main() {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '178 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '187 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
# build library
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform pathes passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '-L{}'.format(libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with shlex
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '215 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targetting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targetting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targetting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targetting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '162 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '36 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '47 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '103 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '46 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '79 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '79 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '53 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '56 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
dependencies_typelist = [
('name', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '55 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '60 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '61 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '62 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '153 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2])}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '40 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '40 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '46 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '63 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '196 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write('''[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''.format(os.path.join(testdir, 'some_cross_tool.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '58 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, '')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '52 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '202 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('63 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '51 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '55 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '55 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_std_remains(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '50 std remains')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '58 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '50 std remains')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '55 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in kwargs:
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in kwargs.items():
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
if mesonbuild.environment.detect_msys2_arch():
f.write(r'@python3 {} %*'.format(filename))
else:
f.write('@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr')
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functioality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '57 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX:
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def main():
unset_envs()
pytest_args = ['-n', 'auto', './run_unittests.py']
if shutil.which('pytest-3'):
return subprocess.run(['pytest-3'] + pytest_args).returncode
elif shutil.which('pytest'):
return subprocess.run(['pytest'] + pytest_args).returncode
try:
import pytest # noqa: F401
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
pass
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
sys.exit(main())
|
resource_owner_grant.py
|
import json
import logging
import os
import signal
import sys
import urllib2
from multiprocessing.process import Process
from urllib2 import HTTPError
from wsgiref.simple_server import make_server
sys.path.insert(0, os.path.abspath(os.path.realpath(__file__) + '/../../../'))
from oauth2.compatibility import parse_qs, urlencode
from oauth2 import Provider
from oauth2.error import UserNotAuthenticated
from oauth2.store.memory import ClientStore, TokenStore
from oauth2.tokengenerator import Uuid4
from oauth2.web import ResourceOwnerGrantSiteAdapter
from oauth2.web.wsgi import Application
from oauth2.grant import ResourceOwnerGrant
logging.basicConfig(level=logging.DEBUG)
class ClientApplication(object):
"""
Very basic application that simulates calls to the API of the
python-oauth2 app.
"""
client_id = "abc"
client_secret = "xyz"
token_endpoint = "http://localhost:8080/token"
LOGIN_TEMPLATE = """<html>
<body>
<h1>Test Login</h1>
<div style="color: red;">
{failed_message}
</div>
<form method="POST" name="confirmation_form" action="/request_token">
<div>
Username (foo): <input name="username" type="text" />
</div>
<div>
Password (bar): <input name="password" type="password" />
</div>
<div>
<input type="submit" value="submit" />
</div>
</form>
</body>
</html>"""
SERVER_ERROR_TEMPLATE = """<html>
<body>
<h1>OAuth2 server responded with an error</h1>
Error type: {error_type}
Error description: {error_description}
</body>
</html>"""
TOKEN_TEMPLATE = """<html>
<body>
<div>Access token: {access_token}</div>
<div>
<a href="/reset">Reset</a>
</div>
</body>
</html>"""
def __init__(self):
self.token = None
self.token_type = ""
def __call__(self, env, start_response):
if env["PATH_INFO"] == "/login":
status, body, headers = self._login(failed=env["QUERY_STRING"] == "failed=1")
elif env["PATH_INFO"] == "/":
status, body, headers = self._display_token()
elif env["PATH_INFO"] == "/request_token":
status, body, headers = self._request_token(env)
elif env["PATH_INFO"] == "/reset":
status, body, headers = self._reset()
else:
status = "301 Moved"
body = ""
headers = {"Location": "/"}
start_response(status,
[(header, val) for header,val in headers.iteritems()])
return body
def _display_token(self):
"""
Display token information or redirect to login prompt if none is
available.
"""
if self.token is None:
return "301 Moved", "", {"Location": "/login"}
return ("200 OK",
self.TOKEN_TEMPLATE.format(
access_token=self.token["access_token"]),
{"Content-Type": "text/html"})
def _login(self, failed=False):
"""
Login prompt
"""
if failed:
content = self.LOGIN_TEMPLATE.format(failed_message="Login failed")
else:
content = self.LOGIN_TEMPLATE.format(failed_message="")
return "200 OK", content, {"Content-Type": "text/html"}
def _request_token(self, env):
"""
Retrieves a new access token from the OAuth2 server.
"""
params = {}
content = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
post_params = parse_qs(content)
# Convert to dict for easier access
for param, value in post_params.items():
decoded_param = param.decode('utf-8')
decoded_value = value[0].decode('utf-8')
if decoded_param == "username" or decoded_param == "password":
params[decoded_param] = decoded_value
params["grant_type"] = "password"
params["client_id"] = self.client_id
params["client_secret"] = self.client_secret
# Request an access token by POSTing a request to the auth server.
try:
response = urllib2.urlopen(self.token_endpoint, urlencode(params))
except HTTPError, he:
if he.code == 400:
error_body = json.loads(he.read())
body = self.SERVER_ERROR_TEMPLATE\
.format(error_type=error_body["error"],
error_description=error_body["error_description"])
return "400 Bad Request", body, {"Content-Type": "text/html"}
if he.code == 401:
return "302 Found", "", {"Location": "/login?failed=1"}
self.token = json.load(response)
return "301 Moved", "", {"Location": "/"}
def _reset(self):
self.token = None
return "302 Found", "", {"Location": "/login"}
class TestSiteAdapter(ResourceOwnerGrantSiteAdapter):
def authenticate(self, request, environ, scopes, client):
username = request.post_param("username")
password = request.post_param("password")
# A real world application could connect to a database, try to
# retrieve username and password and compare them against the input
if username == "foo" and password == "bar":
return
raise UserNotAuthenticated
def run_app_server():
app = ClientApplication()
try:
httpd = make_server('', 8081, app)
print("Starting Client app on http://localhost:8081/...")
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
def run_auth_server():
try:
client_store = ClientStore()
client_store.add_client(client_id="abc", client_secret="xyz",
redirect_uris=[])
token_store = TokenStore()
provider = Provider(
access_token_store=token_store,
auth_code_store=token_store,
client_store=client_store,
token_generator=Uuid4())
provider.add_grant(
ResourceOwnerGrant(site_adapter=TestSiteAdapter())
)
app = Application(provider=provider)
httpd = make_server('', 8080, app)
print("Starting OAuth2 server on http://localhost:8080/...")
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
def main():
auth_server = Process(target=run_auth_server)
auth_server.start()
app_server = Process(target=run_app_server)
app_server.start()
print("Visit http://localhost:8081/ in your browser")
def sigint_handler(signal, frame):
print("Terminating servers...")
auth_server.terminate()
auth_server.join()
app_server.terminate()
app_server.join()
signal.signal(signal.SIGINT, sigint_handler)
if __name__ == "__main__":
main()
|
agent_a3c_hg.py
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2
import tensorflow as tf
import threading
import sys
import time
import os
def MakeDir(path):
try:
os.makedirs(path)
except:
pass
lab = False
load_model = False
train = True
test_display = False
test_write_video = True
path_work_dir = "rl_3d/"
vizdoom_path = "ViZDoom/"
vizdoom_scenario = vizdoom_path + "scenarios/health_gathering.wad"
if (lab):
from env_lab import EnvLab
model_path = path_work_dir + "model_lab_a3c/"
else:
from env_vizdoom_hgg import EnvVizDoom
model_path = path_work_dir + "model_vizdoom_a3c/"
learning_rate = 0.00025
device = "/cpu:0"
num_workers = 3
t_max = 30
frame_repeat = 4 #10 # 4
gamma = 0.99
step_num = int(5e5)
save_each = 0.01 * step_num
step_load = 100
entropy_beta = 0.01
grad_norm_clip = 40.0
global_scope_name = "global"
step = 0
train_scores = []
lock = threading.Lock()
start_time = 0
# Global.
env = None
MakeDir(model_path)
model_name = model_path + "a3c"
def PrintStat(elapsed_time, step, step_num, train_scores):
steps_per_s = 1.0 * step / elapsed_time
steps_per_m = 60.0 * step / elapsed_time
steps_per_h = 3600.0 * step / elapsed_time
steps_remain = step_num - step
remain_h = int(steps_remain / steps_per_h)
remain_m = int((steps_remain - remain_h * steps_per_h) / steps_per_m)
remain_s = int((steps_remain - remain_h * steps_per_h - remain_m * steps_per_m) / steps_per_s)
elapsed_h = int(elapsed_time / 3600)
elapsed_m = int((elapsed_time - elapsed_h * 3600) / 60)
elapsed_s = int((elapsed_time - elapsed_h * 3600 - elapsed_m * 60))
print("{}% | Steps: {}/{}, {:.2f}M step/h, {:02}:{:02}:{:02}/{:02}:{:02}:{:02}".format(
100.0 * step / step_num, step, step_num, steps_per_h / 1e6,
elapsed_h, elapsed_m, elapsed_s, remain_h, remain_m, remain_s), file=sys.stderr)
mean_train = 0
std_train = 0
min_train = 0
max_train = 0
if (len(train_scores) > 0):
train_scores = np.array(train_scores)
mean_train = train_scores.mean()
std_train = train_scores.std()
min_train = train_scores.min()
max_train = train_scores.max()
print("Episodes: {} Rewards: mean: {:.2f}, std: {:.2f}, min: {:.2f}, max: {:.2f}".format(
len(train_scores), mean_train, std_train, min_train, max_train), file=sys.stderr)
channels = 3
resolution = (40, 40, channels)
def Preprocess(frame):
if (channels == 1):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, (resolution[1], resolution[0]))
return np.reshape(frame, resolution)
class ACNet(object):
def __init__(self, num_actions, scope, trainer):
with tf.variable_scope(scope):
self.inputs = tf.placeholder(shape=[None] + list(resolution), dtype=tf.float32)
conv1 = tf.contrib.layers.conv2d(self.inputs, num_outputs=16, kernel_size=[3, 3], stride=[2, 2])
conv2 = tf.contrib.layers.conv2d(conv1, num_outputs=32, kernel_size=[3, 3], stride=[2, 2])
conv2_flat = tf.contrib.layers.flatten(conv2)
hidden = tf.contrib.layers.fully_connected(conv2_flat, 256)
# Recurrent network for temporal dependencies
# Introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
rnn_in = tf.expand_dims(hidden, [0])
lstm_size = 256
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size, state_is_tuple=True)
step_size = tf.shape(self.inputs)[:1]
c_init = np.zeros((1, lstm_cell.state_size.c), dtype=np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), dtype=np.float32)
self.state_init = [c_init, h_init]
self.rnn_state = self.state_init
c_in = tf.placeholder(shape=[1, lstm_cell.state_size.c], dtype=tf.float32)
h_in = tf.placeholder(shape=[1, lstm_cell.state_size.h], dtype=tf.float32)
self.state_in = (c_in, h_in)
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm_cell, rnn_in, initial_state=state_in,
sequence_length=step_size, time_major=False)
lstm_c, lstm_h = lstm_state
rnn_out = tf.reshape(lstm_outputs, [-1, lstm_size])
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
# Output layers for policy and value estimations
self.policy = tf.contrib.layers.fully_connected(rnn_out, num_actions, activation_fn=tf.nn.softmax,
weights_initializer=self.normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = tf.contrib.layers.fully_connected(rnn_out, 1, activation_fn=None,
weights_initializer=self.normalized_columns_initializer(1.0),
biases_initializer=None)
# Only the worker network need ops for loss functions and gradient updating.
if (scope != global_scope_name):
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
actions_onehot = tf.one_hot(self.actions, num_actions, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
responsible_outputs = tf.reduce_sum(self.policy * actions_onehot, [1])
# Loss functions
value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
policy_loss = -tf.reduce_sum(tf.log(responsible_outputs) * self.advantages)
self.loss = 0.5 * value_loss + policy_loss - entropy * entropy_beta
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss, local_vars)
if (grad_norm_clip != None):
grads, _ = tf.clip_by_global_norm(self.gradients, grad_norm_clip)
else:
grads = self.gradients
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_scope_name)
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(self, std = 1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def Train(self, sess, discounted_rewards, states, actions, advantages):
states = states / 255.0
self.ResetLstm()
feed_dict = {self.target_v : discounted_rewards,
self.inputs : np.stack(states, axis=0),
self.actions : actions,
self.advantages : advantages,
self.state_in[0] : self.rnn_state[0],
self.state_in[1] : self.rnn_state[1]}
_ = sess.run([self.apply_grads], feed_dict=feed_dict)
def ResetLstm(self):
self.rnn_state = self.state_init
def GetAction(self, sess, state):
state = state / 255.0
a_dist, v, self.rnn_state = sess.run([self.policy, self.value, self.state_out],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
a = np.random.choice(a_dist[0], p=a_dist[0])
a = np.argmax(a_dist == a)
return a, v[0, 0]
def GetValue(self, sess, state):
state = state / 255.0
v = sess.run([self.value],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
return v[0][0, 0]
class Worker(object):
def __init__(self, number, num_actions, trainer, model_name):
self.name = "worker_" + str(number)
self.number = number
self.model_name = model_name
# Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_ac = ACNet(num_actions, self.name, trainer)
self.update_target_graph = self.update_target(global_scope_name, self.name)
if (lab):
self.env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
self.env = EnvVizDoom(vizdoom_scenario)
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target(self, from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var, to_var in zip(from_vars, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Calculate discounted returns.
def Discount(self, x, gamma):
for idx in reversed(range(len(x) - 1)):
x[idx] += x[idx + 1] * gamma
return x
def Start(self, session, saver, coord):
worker_process = lambda: self.Process(session, saver, coord)
thread = threading.Thread(target=worker_process)
thread.start()
global start_time
start_time = time.time()
return thread
def Train(self, episode_buffer, sess, bootstrap_value):
episode_buffer = np.array(episode_buffer)
states = episode_buffer[:, 0]
actions = episode_buffer[:, 1]
rewards = episode_buffer[:, 2]
values = episode_buffer[:, 3]
# Here we take the rewards and values from the episode_buffer, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = self.Discount(rewards_plus, gamma)[:-1]
value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * value_plus[1:] - value_plus[:-1]
advantages = self.Discount(advantages, gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
self.local_ac.Train(sess, discounted_rewards, states, actions, advantages)
def Process(self, sess, saver, coord):
global step, train_scores, start_time, lock
print("Starting worker " + str(self.number))
while (not coord.should_stop()):
sess.run(self.update_target_graph)
episode_buffer = []
episode_reward = 0
self.env.Reset()
s = self.env.Observation()
s = Preprocess(s)
self.local_ac.ResetLstm()
while (self.env.IsRunning()):
# Take an action using probabilities from policy network output.
a, v = self.local_ac.GetAction(sess, s)
r = self.env.Act(a, frame_repeat)
finished = not self.env.IsRunning()
if (not finished):
s1 = self.env.Observation()
s1 = Preprocess(s1)
else:
s1 = None
episode_buffer.append([s, a, r, v])
episode_reward += r
s = s1
lock.acquire()
step += 1
if (step % save_each == 0):
model_name_curr = self.model_name + "_{:04}".format(int(step / save_each))
print("\nSaving the network weigths to:", model_name_curr, file=sys.stderr)
saver.save(sess, model_name_curr)
PrintStat(time.time() - start_time, step, step_num, train_scores)
train_scores = []
if (step == step_num):
coord.request_stop()
lock.release()
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if (len(episode_buffer) == t_max or (finished and len(episode_buffer) > 0)):
# Since we don't know what the true final return is,
# we "bootstrap" from our current value estimation.
if (not finished):
v1 = self.local_ac.GetValue(sess, s)
self.Train(episode_buffer, sess, v1)
episode_buffer = []
sess.run(self.update_target_graph)
else:
self.Train(episode_buffer, sess, 0.0)
lock.acquire()
train_scores.append(episode_reward)
lock.release()
class Agent(object):
def __init__(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
self.session = tf.Session(config=config)
with tf.device(device):
# Global network
self.global_net = ACNet(env.NumActions(), global_scope_name, None)
if (train):
trainer = tf.train.RMSPropOptimizer(learning_rate)
workers = []
for i in range(num_workers):
workers.append(Worker(i, env.NumActions(), trainer, model_name))
saver = tf.train.Saver(max_to_keep=100)
if (load_model):
model_name_curr = model_name + "_{:04}".format(step_load)
print("Loading model from: ", model_name_curr)
saver.restore(self.session, model_name_curr)
else:
self.session.run(tf.global_variables_initializer())
if (train):
coord = tf.train.Coordinator()
# Start the "work" process for each worker in a separate thread.
worker_threads = []
for worker in workers:
thread = worker.Start(self.session, saver, coord)
worker_threads.append(thread)
coord.join(worker_threads)
def Reset(self):
self.global_net.ResetLstm()
def Act(self, state):
action, _ = self.global_net.GetAction(self.session, state)
return action
def Test(agent):
if (test_write_video):
size = (640, 480)
fps = 30.0
fourcc = cv2.VideoWriter_fourcc(*'XVID') # cv2.cv.CV_FOURCC(*'XVID')
out_video = cv2.VideoWriter("drive/test1.avi", fourcc, fps, size)
posX = []
posY = []
posX.append('%')
posY.append('%')
ep_counter = 1
reward_list = []
ep_list = []
reward_total = 0
num_episodes = 30
while (num_episodes != 0):
if (not env.IsRunning()):
env.Reset()
agent.Reset()
posX.append('%')
posY.append('%')
print("Total reward: {}".format(reward_total))
reward_list.append(reward_total)
ep_list.append(ep_counter)
ep_counter+=1
reward_total = 0
num_episodes -= 1
state_raw = env.Observation()
state = Preprocess(state_raw)
action = agent.Act(state)
for _ in range(frame_repeat):
if (test_display):
cv2.imshow("frame-test", state_raw)
cv2.waitKey(20)
if (test_write_video):
out_video.write(state_raw)
reward = env.Act(action, 1)
reward_total += reward
if (not env.IsRunning()):
break
state_raw = env.Observation()
posX.append(env.positionX())
posY.append(env.positionY())
print(reward_list)
print(ep_list)
print(posX)
print(posY)
if __name__ == '__main__':
if (lab):
env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
env = EnvVizDoom(vizdoom_scenario)
agent = Agent()
Test(agent)
|
cw-2.py
|
from __future__ import print_function
class AssertException(Exception):
pass
_print = print
'''Fix the dreaded Unicode Error Trap'''
def print(*args, **kwargs):
from sys import stdout
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
file = kwargs.get('file', stdout)
def _replace(c):
if ord(c) >= 128: return u'&#{};'.format(ord(c))
return c
def _escape(s): return ''.join(_replace(c) for c in s)
_print(*map(_escape, args), sep=_escape(sep), end=_escape(end), file=file)
def format_message(message):
def _replace(c):
if ord(c) >= 65536: return r'\U' + hex(ord(c))[2:].zfill(8)
if ord(c) >= 128: return r'\u' + hex(ord(c))[2:].zfill(4)
return c
def _escape(s): return ''.join(_replace(c) for c in s)
return _escape(message.replace("\n", "<:LF:>"))
def display(type, message, label="", mode=""):
print("\n<{0}:{1}:{2}>{3}".format(type.upper(), mode.upper(), label, format_message(message)))
def expect(passed=None, message=None, allow_raise=False):
if passed:
display('PASSED', 'Test Passed')
else:
message = message or "Value is not what was expected"
display('FAILED', message)
if allow_raise:
raise AssertException(message)
def assert_equals(actual, expected, message=None, allow_raise=False):
equals_msg = "{0} should equal {1}".format(repr(actual), repr(expected))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(actual == expected, message, allow_raise)
def assert_not_equals(actual, expected, message=None, allow_raise=False):
equals_msg = "{0} should not equal {1}".format(repr(actual), repr(expected))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(not (actual == expected), message, allow_raise)
def expect_error(message, function):
passed = False
try:
function()
except:
passed = True
expect(passed, message)
def pass_(): expect(True)
def fail(message): expect(False, message)
def assert_approx_equals(actual, expected, margin=1e-9, message=None, allow_raise=False):
equals_msg = "{0} should be close to {1} with absolute or relative margin of {2}".format(
repr(actual), repr(expected), repr(margin))
if message is None: message = equals_msg
else: message += ": " + equals_msg
div = max(abs(actual), abs(expected), 1)
expect(abs((actual - expected) / div) < margin, message, allow_raise)
'''
Usage:
@describe('describe text')
def describe1():
@it('it text')
def it1():
# some test cases...
'''
def _timed_block_factory(opening_text):
from timeit import default_timer as timer
from traceback import format_exception
from sys import exc_info
def _timed_block_decorator(s, before=None, after=None):
display(opening_text, s)
def wrapper(func):
if callable(before): before()
time = timer()
try: func()
except:
fail('Unexpected exception raised')
tb_str = ''.join(format_exception(*exc_info()))
display('ERROR', tb_str)
display('COMPLETEDIN', '{:.2f}'.format((timer() - time) * 1000))
if callable(after): after()
return wrapper
return _timed_block_decorator
describe = _timed_block_factory('DESCRIBE')
it = _timed_block_factory('IT')
'''
Timeout utility
Usage:
@timeout(sec)
def some_tests():
any code block...
Note: Timeout value can be a float.
'''
def timeout(sec):
def wrapper(func):
from multiprocessing import Process
process = Process(target=func)
process.start()
process.join(sec)
if process.is_alive():
fail('Exceeded time limit of {:.3f} seconds'.format(sec))
process.terminate()
process.join()
return wrapper
|
run.py
|
import socket
import time
import threading
import hashlib
import json
from base64 import b64encode, b64decode
from flask import Flask, jsonify, request
from Crypto.Cipher import AES
class Run:
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def connect(self):
server_ip = "Server IP Address"
server_port = 0000
try:
self.s.connect((server_ip, server_port))
print("You have connected to the server successfully!")
while True:
try:
self.data = self.s.recv(500000000).decode('utf-8')
except ConnectionResetError:
raise RuntimeError("The server has shut down!")
except ConnectionAbortedError:
raise RuntimeError("The server has shut down!")
except AttributeError:
raise AssertionError("Either you have not connected to the server, or you just need to refresh the page you're trying to access!")
except socket.error:
raise RuntimeError("Your attempt to connect to the server failed!")
def send(self, message):
try:
self.s.send(str(message).encode())
return True
except socket.error:
raise RuntimeError("Your attempt to send a message to the server failed!")
def get_graph(self):
self.send("get_graph")
time.sleep(0.1)
try:
if "index" in util.data:
graph_ = eval(util.data)
try:
with open("user/graph.json", "x") as f:
json.dump(graph_, f)
f.close()
except FileExistsError:
with open("user/graph.json", "w") as f:
f.truncate(0)
json.dump(graph_, f)
f.close()
except AttributeError:
raise AssertionError("Either you have not connected to the server, or you just need to refresh the page you're trying to access!")
@staticmethod
def get_graph_file(self):
with open("user/graph.json", "r") as f:
graph_from_file = dict(json.load(f))
return graph_from_file
@staticmethod
def aes_wallet_decrypt(self, phrase_hash, keystore):
with open("user/lixur_phrase.txt", "r") as f:
user_input = f.read().replace(" ", "")
if hashlib.sha256(user_input.encode('utf-8')).hexdigest() == phrase_hash:
cipher = AES.new(bytes(user_input, encoding='utf-8'), AES.MODE_EAX, b64decode(keystore['nonce'].encode('utf-8')))
plaintext = cipher.decrypt(b64decode(keystore['cipher_text'].encode('utf-8')))
cipher.verify(b64decode(keystore['tag'].encode('utf-8')))
private_key = eval(plaintext.decode('utf-8'))['_']
public_key = eval(plaintext.decode('utf-8'))['__']
alphanumeric_address = hashlib.sha256(public_key).hexdigest()
try:
return private_key, public_key, alphanumeric_address
except NameError:
if hash(user_input) == phrase_hash:
return private_key, public_key, alphanumeric_address
else:
print(f'Decryption failed!, {hash(user_input)} does not match the hash of the keystore: {phrase_hash}')
def get_balance(self, address):
balance = 0
graph_data = self.get_graph_file(self)
for tx in graph_data:
if graph_data[tx]['sender'] == address and graph_data[tx]['recipient'] == address:
balance += float(graph_data[tx]['amount'])
if address == graph_data[tx]['sender']:
balance -= float(graph_data[tx]["amount"])
if address == graph_data[tx]["recipient"]:
balance += float(graph_data[tx]["amount"])
balance = float(balance)
return balance
def does_address_exist(self, address):
with open('user/graph.json', 'r') as f:
data = dict(json.load(f))
addresses = []
for x in data.values():
addresses.append(x['sender'])
addresses.append(x['recipient'])
if address in addresses:
return True
else:
return False
def make_transaction(self, sender, receiver, amount, public_key, private_key):
if amount <= 0:
raise ValueError("You must have LXR to send!")
else:
arguments = {
"sender": sender,
"receiver": receiver,
"amount": amount,
"public_key": public_key,
"private_key": private_key
}
self.send(arguments)
time.sleep(1.5)
util.get_graph()
app = Flask(__name__)
util = Run()
@app.route("/", methods=['GET', 'POST'])
def graph():
util.get_graph()
time.sleep(0.25)
util.get_graph()
time.sleep(0.25)
with open('user/graph.json', 'r') as f:
serializable_format = dict(json.load(f))
graph = sorted(serializable_format.items(), key=lambda x: x[1]["index"], reverse=True)
return jsonify(graph), 201
@app.route("/stats", methods=['GET', 'POST'])
def stats():
ledger = util.get_graph_file(util)
unique_addresses = []
for key in ledger:
if ledger[key]['sender'] not in unique_addresses:
unique_addresses.append(ledger[key]['sender'])
if ledger[key]['recipient'] not in unique_addresses:
unique_addresses.append(ledger[key]['recipient'])
unique_addresses = list(set(unique_addresses))
unique_addresses.remove("None")
number_of_unique_addresses = len(unique_addresses) - 1
total_amount_of_lxr = 0
for key in ledger:
if ledger[key]['sender'] == ledger[key]['recipient']:
total_amount_of_lxr += ledger[key]['amount']
total_amount_of_lxr = "{:,}".format(total_amount_of_lxr) + " LXR"
response = {
"Successful Transaction Count": len(ledger.keys()),
"Total Unique Addresses": number_of_unique_addresses,
"Total Supply of LXR": total_amount_of_lxr,
}
return jsonify(response), 201
@app.route('/wallet/new', methods=['GET', 'POST'])
def new_wallet():
util.send('new')
time.sleep(0.5)
if type(eval(util.data)) == tuple and "is_existing" not in locals():
keystore = eval(util.data)[0]
try:
with open("user/lixur_keystore.txt", "x") as f:
f.write(keystore)
f.close()
except FileExistsError:
with open("user/lixur_keystore.txt", "w") as f:
f.truncate(0)
f.write(keystore)
f.close()
phrase = eval(util.data)[1]
try:
with open("user/lixur_phrase.txt", "x") as f:
f.write(str(phrase))
f.close()
except FileExistsError:
with open("user/lixur_phrase.txt", "w") as f:
f.truncate(0)
f.write(str(phrase))
f.close()
print(f"Your seedphrase for your new wallet is: {str(phrase)}")
print("Write it down, store it in a safe place as you'll need it to access your wallet. If you lose your seedphrase, you will lose access to your wallet!")
print("Do not share it with anyone! Anyone with your seedphrase will have unlimited access over your funds, forever!")
print("Your keystore and your phrase have been saved onto your device.")
with open("user/lixur_keystore.txt", "r") as f:
keystore_ = eval(f.read())
wallet_info = util.aes_wallet_decrypt(util, keystore_['hash'], keystore_)
util.make_transaction(wallet_info[2], wallet_info[2], 69420000, wallet_info[1], wallet_info[0])
else:
raise RuntimeError("Something went wrong! Please try again!")
return jsonify('If you have been given your seedphrase, Go to http://127.0.0.1:5000/wallet/load to see your address and balance!'
' If not, refresh the page and try again.')
@app.route("/wallet/load", methods=['GET', 'POST'])
def get_balance():
with open("user/lixur_keystore.txt", "r") as f:
ks = eval(f.read())
decrypt_ks = util.aes_wallet_decrypt(util, ks['hash'], ks)
util.get_graph()
time.sleep(0.5)
if util.does_address_exist(decrypt_ks[2]) == True:
user_stats = {
"address": decrypt_ks[2],
"balance": f'{"{:,}".format(util.get_balance(decrypt_ks[2]))} LXR',
}
else:
raise ValueError(
"The wallet address you're trying to access does not exist on the blockchain. "
"Refresh the page and try again, if the error persists, it means it doesn't exist at all.")
return jsonify(user_stats)
@app.route("/transaction", methods=['GET', 'POST'])
def make_transaction():
with open("user/lixur_keystore.txt", "r") as f:
keystore = eval(f.read())
decrypted_keystore = util.aes_wallet_decrypt(util, keystore['hash'], keystore)
user_private_key = decrypted_keystore[0]
user_public_key = decrypted_keystore[1]
user_address = decrypted_keystore[2]
if util.does_address_exist(user_address) == True:
prep_arguments = {
"sender": user_address,
"receiver": input("Enter the receiver's address: "),
"amount": float(input("Enter the amount of LXR you want to send: ")),
"public_key": user_public_key,
"private_key": user_private_key,
}
if prep_arguments['sender'] == prep_arguments['receiver']:
raise ValueError("You cannot send LXR to yourself!")
if prep_arguments['amount'] == None or float(prep_arguments['amount']) <= 0:
raise ValueError("You cannot send 0 or less LXR!")
if util.does_address_exist(prep_arguments['receiver']) == False:
raise ValueError("The receiver's address does not exist on the blockchain! Refresh the blockchain and try again. If it still persists, it means that it doesn't "
"exist at all.")
else:
print(f'Sending {"{:,}".format(prep_arguments["amount"])} LXR to {str(prep_arguments["receiver"])}...')
util.send(prep_arguments)
time.sleep(1.2)
util.get_graph()
else:
raise ValueError("Your wallet address does not exist on the blockchain. Please try again.")
return jsonify("The transaction has been sent! Refresh the graph and check to see if it has been validated and added to the graph!"), 200
if __name__ == "__main__":
print("Booting up Lixur Testnet [Beta] v0.0.1...\n")
threading.Thread(target=util.connect).start()
time.sleep(.5)
app.run()
|
interaction_view.py
|
import enum
import logging
from threading import Thread
from PySide2 import QtWidgets, QtCore
from .view import BaseView
try:
import nclib
except ImportError:
nclib = None
try:
import keystone
except ImportError:
keystone = None
try:
import archr
except ImportError:
archr = None
_l = logging.getLogger(name=__name__)
# not a namedtuple so it can be mutable. I think this is not a terrible idea.
class SavedInteraction:
def __init__(self, name, protocol, log):
self.name = name
self.protocol = protocol
self.log = log
class ProtocolInteractor:
def __init__(self, view, sock):
self.view = view # type: InteractionView
self.sock = sock # type: nclib.Netcat
def consume_data(self, data):
# try to decode it
# add it to the log, perhaps mutating the last entry if an entire entity wasn't received (no support yet)
raise NotImplementedError
def consume_start(self):
raise NotImplementedError
def consume_eof(self):
raise NotImplementedError
def render_input_form(self):
# determine what the current input control should look like. returns a QWidget.
# if there's already partially written input, translate it to the new form if possible
raise NotImplementedError
def render_log_entry(self, model):
# render a model from the log into a QWidget
raise NotImplementedError
class InteractionState(enum.Enum):
BEGINNING = 1
RUNNING = 2
STOPPED = 3
VIEWING = 4
class InteractionView(BaseView):
def __init__(self, workspace, *args, **kwargs):
super().__init__('interaction', workspace, *args, **kwargs)
self.base_caption = 'Interaction'
self.current_log = [] # for now each entry is a dict. each entry has {"dir": "in"/"out", "data": bytes} and then whatever
# "in" here means it's input to the program
self.log_controls = []
self.sock = None # type: nclib.Netcat
self._state = None
self.widget_button_start = None
self.widget_button_stop = None
self.widget_combobox_protocol = None
self.widget_area_log = None
self.widget_input = None
self.widget_text_savename = None
self.widget_group_start = None
self.widget_group_running = None
self.widget_group_save = None
self.widget_group_load = None
self.running_protocol = None # type: ProtocolInteractor
self.chosen_protocol = None # type: type
self._init_widgets()
self._state_transition(InteractionState.BEGINNING)
self._signal_start.connect(self._handler_start)
self._signal_data.connect(self._handler_data)
self._signal_eof.connect(self._handler_eof)
_signal_start = QtCore.Signal()
_signal_data = QtCore.Signal(bytes)
_signal_eof = QtCore.Signal()
@property
def selected_protocol(self):
return self.workspace.instance.interaction_protocols[self.widget_combobox_protocol.currentIndex()]
# log_add/clear will be called by the base class. it's the subclass' responsibility to call input_show and
# input_hide depending on whether or not the protocol is accepting input
def log_add(self, model):
self.current_log.append(model)
control = self.running_protocol.render_log_entry(model)
control.setParent(self.widget_area_log)
self.widget_area_log.layout().insertWidget(len(self.log_controls), control)
self.log_controls.append(control)
def log_clear(self):
for control in self.log_controls:
self.widget_area_log.layout().removeWidget(control)
control.deleteLater()
self.log_controls = []
self.current_log = []
def input_show(self):
if self.running_protocol is None:
self.input_hide()
return
new_widget = self.running_protocol.render_input_form()
if new_widget is None:
return
if self.widget_input is not None:
self.input_hide()
new_widget.setParent(self.widget_area_log)
self.widget_area_log.layout().insertWidget(len(self.log_controls), new_widget)
self.widget_input = new_widget
def input_hide(self):
if self.widget_input is None:
return
self.widget_area_log.layout().removeWidget(self.widget_input)
self.widget_input.deleteLater()
self.widget_input = None
# events from the thread
def _handler_start(self):
self.running_protocol.consume_start()
def _handler_data(self, data):
self.running_protocol.consume_data(data)
def _handler_eof(self):
self.running_protocol.consume_eof()
self._state_transition(InteractionState.STOPPED)
# data model events
def _handler_update_interactions(self, **kwargs):
while self.widget_combobox_load.count():
self.widget_combobox_load.removeItem(0)
for interaction in self.workspace.instance.interactions:
self.widget_combobox_load.addItem(interaction.name)
def _handler_update_protocols(self, **kwargs):
while self.widget_combobox_protocol.count():
self.widget_combobox_protocol.removeItem(0)
for protocol in self.workspace.instance.interaction_protocols:
self.widget_combobox_protocol.addItem(protocol.__name__)
# utility for tweaking the control panel
def _state_transition(self, state):
self._state = state
if state == InteractionState.BEGINNING:
self.widget_group_start.setHidden(False)
self.widget_group_running.setHidden(True)
self.widget_group_save.setHidden(True)
self.widget_group_load.setHidden(False)
self.input_hide()
self.log_clear()
elif state == InteractionState.RUNNING:
self.widget_group_start.setHidden(True)
self.widget_group_running.setHidden(False)
self.widget_group_save.setHidden(True)
self.widget_group_load.setHidden(True)
self.log_clear()
elif state == InteractionState.STOPPED:
self.widget_group_start.setHidden(False)
self.widget_group_running.setHidden(True)
self.widget_group_save.setHidden(False)
self.widget_group_load.setHidden(False)
self.input_hide()
self.running_protocol = None
elif state == InteractionState.VIEWING:
self.widget_group_start.setHidden(False)
self.widget_group_running.setHidden(True)
self.widget_group_save.setHidden(True)
self.widget_group_load.setHidden(False)
self.input_hide()
self.log_clear()
else:
raise ValueError(state)
# buttons
def _save_interaction(self):
self.workspace.instance.interactions.am_obj.append(SavedInteraction(self.widget_text_savename.text(), self.chosen_protocol, self.current_log))
self.workspace.instance.interactions.am_event()
def _load_interaction(self):
if self.widget_combobox_load.currentIndex() == -1:
return
thing = self.workspace.instance.interactions[self.widget_combobox_load.currentIndex()]
self.chosen_protocol = thing.protocol
self.running_protocol = self.chosen_protocol(self, None) # does this mean the abstractions are fucked?
self._state_transition(InteractionState.VIEWING)
for model in thing.log:
self.log_add(model)
def _abort_interaction(self):
self.running_protocol.sock.close()
self._state_transition(InteractionState.STOPPED)
def _start_interaction(self):
required = {
'archr: git clone https://github.com/angr/archr && cd archr && pip install -e .':archr,
'keystone: pip install --no-binary keystone-engine keystone-engine':keystone
}
is_missing = [ key for key, value in required.items() if value is None ]
if len(is_missing) > 0:
req_msg = 'To use this feature you need to install the following:\n\n\t' + '\n\t'.join(is_missing)
req_msg += '\n\nInstall them to enable this functionality.'
req_msg += '\nRelaunch angr-management after install.'
QtWidgets.QMessageBox.critical(None, 'Dependency error', req_msg)
return
img_name = self.workspace.instance.img_name
if img_name is None:
QtWidgets.QMessageBox.critical(None, 'Nothing to run', "The project was not loaded from a docker image")
return
_l.debug('Initializing the connection to archr with image %s' % img_name)
self._state_transition(InteractionState.RUNNING)
Thread(target=self._socket_thread, args=(img_name,), daemon=True).start()
def _socket_thread(self, img_name):
with archr.targets.DockerImageTarget(img_name).build().start() as target:
with target.flight_context() as flight:
sock = flight.default_channel
sock._raise_timeout = True
self.chosen_protocol = self.selected_protocol
self.running_protocol = self.chosen_protocol(self, sock)
_l.debug("Connected to running target")
self._signal_start.emit()
while self.running_protocol is not None:
try:
data = sock.recv(timeout=1)
except nclib.NetcatTimeout:
continue
except nclib.NetcatError:
break
if not data:
break
self._signal_data.emit(data)
if self.running_protocol is not None:
_l.debug("Connection dropped by server")
self._signal_eof.emit()
else:
_l.debug("Connection closed by client")
def _init_widgets(self):
self.setLayout(QtWidgets.QHBoxLayout(self))
leftBox = QtWidgets.QWidget(self)
leftBox.setLayout(QtWidgets.QVBoxLayout(leftBox))
self.layout().addWidget(leftBox)
box_start = QtWidgets.QGroupBox(leftBox)
box_start.setLayout(QtWidgets.QVBoxLayout(box_start))
box_start.setTitle("New Interaction")
leftBox.layout().addWidget(box_start)
self.widget_group_start = box_start
box_running = QtWidgets.QGroupBox(leftBox)
box_running.setLayout(QtWidgets.QVBoxLayout(box_running))
box_running.setTitle("Interaction Control")
leftBox.layout().addWidget(box_running)
self.widget_group_running = box_running
box_save = QtWidgets.QGroupBox(leftBox)
box_save.setLayout(QtWidgets.QVBoxLayout(box_save))
box_save.setTitle("Save Interaction")
leftBox.layout().addWidget(box_save)
self.widget_group_save = box_save
box_load = QtWidgets.QGroupBox(leftBox)
box_load.setLayout(QtWidgets.QVBoxLayout(box_load))
box_load.setTitle("Load Interaction")
leftBox.layout().addWidget(box_load)
self.widget_group_load = box_load
leftBox.layout().addStretch(0)
protocolBox = QtWidgets.QComboBox(box_start)
box_start.layout().addWidget(protocolBox)
self.widget_combobox_protocol = protocolBox
self.workspace.instance.interaction_protocols.am_subscribe(self._handler_update_protocols)
self._handler_update_protocols()
start_button = QtWidgets.QPushButton(box_start)
start_button.setText("Connect")
start_button.clicked.connect(self._start_interaction)
box_start.layout().addWidget(start_button)
self.widget_button_start = start_button
stop_button = QtWidgets.QPushButton(box_running)
stop_button.setText("Abort Interaction")
stop_button.clicked.connect(self._abort_interaction)
box_running.layout().addWidget(stop_button)
self.widget_button_stop = stop_button
save_text = QtWidgets.QLineEdit(box_save)
save_text.setText("my_interaction")
save_text.setPlaceholderText("Interaction Name")
save_text.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed))
box_save.layout().addWidget(save_text)
self.widget_text_savename = save_text
load_picker = QtWidgets.QComboBox(box_load)
box_load.layout().addWidget(load_picker)
self.widget_combobox_load = load_picker
self.workspace.instance.interactions.am_subscribe(self._handler_update_interactions)
self._handler_update_interactions()
load_button = QtWidgets.QPushButton(box_load)
load_button.setText("Load")
load_button.clicked.connect(self._load_interaction)
box_load.layout().addWidget(load_button)
save_button = QtWidgets.QPushButton(box_save)
save_button.setText("Save")
box_save.layout().addWidget(save_button)
save_button.clicked.connect(self._save_interaction)
scrollArea = QtWidgets.QScrollArea(self)
scrollArea.setWidgetResizable(True)
self.layout().addWidget(scrollArea)
scrollAreaWidgetContents = QtWidgets.QWidget(scrollArea)
scrollAreaWidgetContents.setLayout(QtWidgets.QVBoxLayout(scrollAreaWidgetContents))
scrollArea.setWidget(scrollAreaWidgetContents)
self.widget_area_log = scrollAreaWidgetContents
scrollAreaWidgetContents.layout().addStretch(0)
# Subclass QPlainTextEdit
class SmartPlainTextEdit(QtWidgets.QPlainTextEdit):
def __init__(self, parent, callback):
super(SmartPlainTextEdit, self).__init__(parent)
self._callback = callback
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Return:
if event.modifiers() != QtCore.Qt.ShiftModifier:
self._callback()
return
super(SmartPlainTextEdit, self).keyPressEvent(event)
class PlainTextProtocol(ProtocolInteractor):
def consume_start(self):
# set whatever state related to the beginning of the protocol
# here, we mark that we can accept user input
self.view.input_show()
def consume_data(self, data):
# process the consumption of data coming off the wire
# should deserialize it into whatever form you want and then add it to the log
self.view.log_add({"dir": "out", "data": data})
def consume_eof(self):
# tweak anything you care about on eof
pass
def render_input_form(self):
# will be called whenever we need to show the input form
# should translate any data we need between the old and new forms
if self.view.widget_input is not None:
cur_input = self.view.widget_input.toPlainText()
else:
cur_input = ''
txt = SmartPlainTextEdit(None, self._send_callback)
txt.setPlainText(cur_input)
return txt
def render_log_entry(self, model):
# will be called to render the entries added to the log
txt = QtWidgets.QLabel()
txt.setText(model['data'].decode('latin-1'))
return txt
def _send_callback(self):
data_bytes = self.view.widget_input.toPlainText().encode('latin-1')
self.sock.send(data_bytes)
self.view.log_add({"dir": "in", "data": data_bytes})
self.view.widget_input.setPlainText('')
|
dataset_test.py
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset."""
import threading
import time
from absl.testing import parameterized
import numpy as np
from reverb import client
from reverb import dataset as reverb_dataset
from reverb import errors
from reverb import item_selectors
from reverb import rate_limiters
from reverb import replay_sample
from reverb import server as reverb_server
import tensorflow.compat.v1 as tf
import tree
from tensorflow.python.framework import tensor_spec # pylint:disable=g-direct-tensorflow-import
def make_server():
return reverb_server.Server(
tables=[
reverb_server.Table(
'dist',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1)),
reverb_server.Table(
'signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))),
reverb_server.Table(
'bounded_spec_signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
# Currently only the `shape` and `dtype` of the bounded spec
# is considered during signature check.
# TODO(b/158033101): Check the boundaries as well.
signature=tensor_spec.BoundedTensorSpec(
dtype=tf.float32,
shape=(None, None),
minimum=(0.0, 0.0),
maximum=(10.0, 10.)),
),
],
port=None,
)
class ReplayDatasetTest(tf.test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._server = make_server()
cls._client = client.Client(f'localhost:{cls._server.port}')
def tearDown(self):
super().tearDown()
self._client.reset('dist')
self._client.reset('signatured')
self._client.reset('bounded_spec_signatured')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def _populate_replay(self, sequence_length=100, max_time_steps=None):
max_time_steps = max_time_steps or sequence_length
with self._client.writer(max_time_steps) as writer:
for i in range(1000):
writer.append([np.zeros((3, 3), dtype=np.float32)])
if i % 5 == 0 and i >= sequence_length:
writer.create_item(
table='dist', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='signatured', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='bounded_spec_signatured',
num_timesteps=sequence_length,
priority=1)
def _sample_from(self, dataset, num_samples):
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.evaluate(iterator.initializer)
return [self.evaluate(dataset_item) for _ in range(num_samples)]
@parameterized.named_parameters(
{
'testcase_name': 'default_values',
},
{
'testcase_name': 'num_workers_per_iterator_is_0',
'num_workers_per_iterator': 0,
'want_error': ValueError,
},
{
'testcase_name': 'num_workers_per_iterator_is_1',
'num_workers_per_iterator': 1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_1',
'num_workers_per_iterator': -1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_0',
'max_samples_per_stream': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_1',
'max_samples_per_stream': 1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_1',
'max_samples_per_stream': -1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_0',
'max_in_flight_samples_per_worker': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_1',
'max_in_flight_samples_per_worker': 1,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1',
'max_in_flight_samples_per_worker': -1,
'want_error': ValueError,
},
{
'testcase_name': 'flexible_batch_size_is_minus_2',
'flexible_batch_size': -2,
'want_error': ValueError,
},
{
'testcase_name': 'flexible_batch_size_is_0',
'flexible_batch_size': 0,
'want_error': ValueError,
},
)
def test_sampler_parameter_validation(self, **kwargs):
dtypes = (tf.float32,)
shapes = (tf.TensorShape([3, 3]),)
if 'max_in_flight_samples_per_worker' not in kwargs:
kwargs['max_in_flight_samples_per_worker'] = 100
if 'want_error' in kwargs:
error = kwargs.pop('want_error')
with self.assertRaises(error):
reverb_dataset.ReplayDataset(self._client.server_address, 'dist',
dtypes, shapes, **kwargs)
else:
reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes,
shapes, **kwargs)
def test_iterate(self):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
tf.constant(self._client.server_address),
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100,
flexible_batch_size=2)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# A single sample is returned so the key should be a scalar int64.
self.assertIsInstance(sample.info.key, np.uint64)
np.testing.assert_array_equal(sample.data[0],
np.zeros((3, 3), dtype=np.float32))
def test_distribution_strategy(self):
self._populate_replay()
physical_devices = tf.config.list_physical_devices('CPU')
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
if configs is None:
virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration()
for _ in range(4)]
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0], virtual_devices)
strategy = tf.distribute.MirroredStrategy(['/cpu:%d' % i for i in range(4)])
def reverb_dataset_fn(i):
tf.print('Creating dataset for replica; index:', i)
return reverb_dataset.ReplayDataset(
self._client.server_address,
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100).take(2)
def dataset_fn(_):
return tf.data.Dataset.range(4).flat_map(reverb_dataset_fn).take(2 * 4)
ds = strategy.experimental_distribute_datasets_from_function(dataset_fn)
def check_probabilities(_, v):
probability = v.info.probability
self.assertLen(probability.values, 4)
# Don't use any math ops since tensor values seem to contain
# unaligned tensors on some systems; but tf.print doesn't check alignment.
#
# This seems to be caused by a compatibility issue where DistStrat isn't
# well tested when eager mode is disabled. So instead of treating this
# as a true TF bug, we just work around it. We can remove this hack and
# convert it to e.g. tf.assert_greater type check if/when we enable eager
# execution for these tests.
tf.print('Probability values:', probability.values)
def get_next_value(v):
return tf.distribute.get_replica_context().merge_call(
check_probabilities, args=(v,))
@tf.function
def run_strategy(ds_):
i = tf.constant(0)
for v in ds_:
strategy.run(get_next_value, args=(v,))
i += 1
return i
rs = run_strategy(ds)
# Each iteration contains 4 items - one from each replica. We take 8 items
# total, so there should be 2 iterations.
self.assertEqual(2, self.evaluate(rs))
def test_timeout_invalid_arguments(self):
with self.assertRaisesRegex(ValueError, r'must be an integer >= -1'):
reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=-2,
max_in_flight_samples_per_worker=100)
def test_timeout(self):
dataset_0s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=0,
max_in_flight_samples_per_worker=100)
dataset_1s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=1000,
max_in_flight_samples_per_worker=100)
dataset_2s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=2000,
max_in_flight_samples_per_worker=100)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_0s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 0)
self.assertLess(duration, 5)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_1s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 1)
self.assertLess(duration, 10)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_2s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 2)
self.assertLess(duration, 10)
# If we insert some data, and the rate limiter doesn't force any waiting,
# then we can ask for a timeout of 0s and still get data back.
self._populate_replay()
got = self._sample_from(dataset_0s, 2)
self.assertLen(got, 2)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_inconsistent_signature_size(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32, tf.float64),
shapes=(tf.TensorShape([3, 3]), tf.TensorShape([])),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Inconsistent number of tensors requested from table \'{}\'. '
r'Requested 6 tensors, but table signature shows 5 tensors.'.format(
table_name)):
self._sample_from(dataset, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_dtype(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.int64,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
dataset_emit_sequences = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.int64,),
shapes=(tf.TensorShape([None, 3, 3]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset_emit_sequences, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_shape(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
dataset_emit_sequences = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None, 3]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset_emit_sequences, 10)
@parameterized.parameters([1], [3], [10])
def test_incompatible_shape_when_using_sequence_length(self, sequence_length):
with self.assertRaises(ValueError):
reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([sequence_length + 1, 3, 3]),),
emit_timesteps=False,
sequence_length=sequence_length,
max_in_flight_samples_per_worker=100)
def test_incompatible_dataset_shapes_and_types_without_signature(self):
self._populate_replay()
ds_wrong_shape = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Specification has \(dtype, shape\): \(float, \[\]\). '
r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'):
self._sample_from(ds_wrong_shape, 1)
ds_full_sequences_wrong_shape = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Specification has \(dtype, shape\): \(float, \[\]\). '
r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'):
self._sample_from(ds_full_sequences_wrong_shape, 1)
@parameterized.parameters(
('dist', 1, 1),
('dist', 1, 3),
('dist', 3, 3),
('dist', 3, 5),
('dist', 10, 10),
('dist', 10, 11),
('signatured', 1, 1),
('signatured', 3, 3),
('signatured', 3, 5),
('signatured', 10, 10),
('bounded_spec_signatured', 1, 1),
('bounded_spec_signatured', 3, 3),
('bounded_spec_signatured', 3, 5),
('bounded_spec_signatured', 10, 10),
)
def test_iterate_with_sequence_length(self, table_name, sequence_length,
max_time_steps):
# Also ensure we get sequence_length-shaped outputs when
# writers' max_time_steps != sequence_length.
self._populate_replay(sequence_length, max_time_steps=max_time_steps)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([sequence_length, 3, 3]),),
emit_timesteps=False,
sequence_length=sequence_length,
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys and data should be batched up by the sequence length.
self.assertEqual(sample.info.key.shape, (sequence_length,))
np.testing.assert_array_equal(
sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32))
@parameterized.parameters(
('dist', 1),
('dist', 3),
('dist', 10),
('signatured', 1),
('signatured', 3),
('signatured', 10),
('bounded_spec_signatured', 1),
('bounded_spec_signatured', 3),
('bounded_spec_signatured', 10),
)
def test_iterate_with_unknown_sequence_length(self, table_name,
sequence_length):
self._populate_replay(sequence_length)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None, 3, 3]),),
emit_timesteps=False,
sequence_length=None,
max_in_flight_samples_per_worker=100)
# Check the shape of the items.
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.assertIsNone(dataset_item.info.key.shape.as_list()[0], None)
self.assertIsNone(dataset_item.data[0].shape.as_list()[0], None)
# Verify that once evaluated, the samples has the expected length.
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys and data should be batched up by the sequence length.
self.assertEqual(sample.info.key.shape, (sequence_length,))
np.testing.assert_array_equal(
sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32))
@parameterized.parameters(
('dist', 1, 2),
('dist', 2, 1),
('signatured', 1, 2),
('signatured', 2, 1),
('bounded_spec_signatured', 1, 2),
('bounded_spec_signatured', 2, 1),
)
def test_checks_sequence_length_when_timesteps_emitted(
self, table_name, actual_sequence_length, provided_sequence_length):
self._populate_replay(actual_sequence_length)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([provided_sequence_length, 3, 3]),),
emit_timesteps=True,
sequence_length=provided_sequence_length,
max_in_flight_samples_per_worker=100)
with self.assertRaises(tf.errors.InvalidArgumentError):
self._sample_from(dataset, 10)
@parameterized.named_parameters(
dict(testcase_name='TableDist', table_name='dist'),
dict(testcase_name='TableSignatured', table_name='signatured'),
dict(
testcase_name='TableBoundedSpecSignatured',
table_name='bounded_spec_signatured'))
def test_iterate_batched(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(2, True)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys should be batched up like the data.
self.assertEqual(sample.info.key.shape, (2,))
np.testing.assert_array_equal(sample.data[0],
np.zeros((2, 3, 3), dtype=np.float32))
def test_iterate_nested_and_batched(self):
with self._client.writer(100) as writer:
for i in range(1000):
writer.append({
'observation': {
'data': np.zeros((3, 3), dtype=np.float32),
'extras': [
np.int64(10),
np.ones([1], dtype=np.int32),
],
},
'reward': np.zeros((10, 10), dtype=np.float32),
})
if i % 5 == 0 and i >= 100:
writer.create_item(
table='dist', num_timesteps=100, priority=1)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32),
shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None),
tf.TensorShape([1]))),
tf.TensorShape([10, 10])),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(3)
structure = {
'observation': {
'data':
tf.TensorSpec([3, 3], tf.float32),
'extras': [
tf.TensorSpec([], tf.int64),
tf.TensorSpec([1], tf.int32),
],
},
'reward': tf.TensorSpec([], tf.int64),
}
got = self._sample_from(dataset, 10)
self.assertLen(got, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
transition = tree.unflatten_as(structure, tree.flatten(sample.data))
np.testing.assert_array_equal(transition['observation']['data'],
np.zeros([3, 3, 3], dtype=np.float32))
np.testing.assert_array_equal(transition['observation']['extras'][0],
np.ones([3], dtype=np.int64) * 10)
np.testing.assert_array_equal(transition['observation']['extras'][1],
np.ones([3, 1], dtype=np.int32))
np.testing.assert_array_equal(transition['reward'],
np.zeros([3, 10, 10], dtype=np.float32))
def test_multiple_iterators(self):
with self._client.writer(100) as writer:
for i in range(10):
writer.append([np.ones((81, 81), dtype=np.float32) * i])
writer.create_item(table='dist', num_timesteps=10, priority=1)
trajectory_length = 5
batch_size = 3
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([81, 81]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(trajectory_length)
iterators = [
dataset.make_initializable_iterator() for _ in range(batch_size)
]
items = tf.stack(
[tf.squeeze(iterator.get_next().data) for iterator in iterators])
with self.session() as session:
session.run([iterator.initializer for iterator in iterators])
got = session.run(items)
self.assertEqual(got.shape, (batch_size, trajectory_length, 81, 81))
want = np.array(
[[np.ones([81, 81]) * i for i in range(trajectory_length)]] *
batch_size)
np.testing.assert_array_equal(got, want)
def test_iterate_over_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
self.assertIsInstance(sample.info.probability, np.float64)
np.testing.assert_array_equal(sample.data[0],
np.ones((3, 3), dtype=np.int32))
def test_iterate_over_batched_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(5)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertEqual(sample.info.key.shape, (5,))
np.testing.assert_array_equal(sample.data[0],
np.ones((5, 3, 3), dtype=np.int32))
def test_converts_spec_lists_into_tuples(self):
for _ in range(10):
data = [
(np.ones([1, 1], dtype=np.int32),),
[
np.ones([3, 3], dtype=np.int8),
(np.ones([2, 2], dtype=np.float64),)
],
]
self._client.insert(data, {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=[
(tf.int32,),
[
tf.int8,
(tf.float64,),
],
],
shapes=[
(tf.TensorShape([1, 1]),),
[
tf.TensorShape([3, 3]),
(tf.TensorShape([2, 2]),),
],
],
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
tree.assert_same_structure(sample.data, (
(None,),
(
None,
(None,),
),
))
def test_session_is_closed_while_op_pending(self):
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=tf.float32,
shapes=tf.TensorShape([]),
max_in_flight_samples_per_worker=100)
iterator = dataset.make_initializable_iterator()
item = iterator.get_next()
def _session_closer(sess, wait_time_secs):
def _fn():
time.sleep(wait_time_secs)
sess.close()
return _fn
with self.session() as sess:
sess.run(iterator.initializer)
thread = threading.Thread(target=_session_closer(sess, 3))
thread.start()
with self.assertRaises(tf.errors.CancelledError):
sess.run(item)
class FromTableSignatureTest(tf.test.TestCase):
def test_table_not_found(self):
server = reverb_server.Server([
reverb_server.Table.queue('table_a', 10),
reverb_server.Table.queue('table_c', 10),
reverb_server.Table.queue('table_b', 10),
])
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError,
f'Server at {address} does not contain any table named not_found. '
f'Found: table_a, table_b, table_c.'):
reverb_dataset.ReplayDataset.from_table_signature(
address, 'not_found', 100)
def test_server_not_found(self):
with self.assertRaises(errors.DeadlineExceededError):
reverb_dataset.ReplayDataset.from_table_signature(
'localhost:1234', 'not_found', 100, get_signature_timeout_secs=1)
def test_table_does_not_have_signature(self):
server = make_server()
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError, f'Table dist at {address} does not have a signature.'):
reverb_dataset.ReplayDataset.from_table_signature(
address, 'dist', 100)
def test_sets_dtypes_from_signature(self):
signature = {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
'x': tf.TensorSpec([None], tf.uint64),
}
server = reverb_server.Server(
[reverb_server.Table.queue('queue', 10, signature=signature)])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(dataset.element_spec.data, signature)
def test_sets_dtypes_from_bounded_spec_signature(self):
bounded_spec_signature = {
'a': {
'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3),
'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5),
},
}
server = reverb_server.Server([
reverb_server.Table.queue(
'queue', 10, signature=bounded_spec_signature)
])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(
dataset.element_spec.data, {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
})
def test_combines_sequence_length_with_signature_if_not_emit_timestamps(self):
server = reverb_server.Server([
reverb_server.Table.queue(
'queue',
10,
signature={
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
})
])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}',
'queue',
100,
emit_timesteps=False,
sequence_length=5)
self.assertDictEqual(
dataset.element_spec.data, {
'a': {
'b': tf.TensorSpec([5, 3, 3], tf.float32),
'c': tf.TensorSpec([5], tf.int64),
},
})
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
|
ssh_interactive_commnds.py
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
import sys
import os
import select
import socket
import paramiko
import threading
import multiprocessing
import time
from . import commands
import subprocess
from fabric.api import *
from fabric.state import connections as fab_connections
from tcutils.commands import ssh, execute_cmd, execute_cmd_out
from tcutils.util import *
from tcutils.fabfile import *
class SshConnect(threading.Thread):
def __init__(self, remoteCmdExecuterObj):
threading.Thread.__init__(self)
self.remoteCmdExecuterObj = remoteCmdExecuterObj
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.load_host_keys(
os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
def run(self):
try:
self.ssh.connect(self.remoteCmdExecuterObj.host,
username=self.remoteCmdExecuterObj.username,
password=self.remoteCmdExecuterObj.password)
except:
print("(pid %d) ssh to %s failed.." %
(os.getpid(), self.remoteCmdExecuterObj.host))
return
self.remoteCmdExecuterObj._ssh = self.ssh
class remoteCmdExecuter(object):
def __init__(self):
pass
def execConnect(self, host, username, password):
retry = 0
self.host = host
self.username = username
self.password = password
self._ssh = None
return
while self._ssh == None and retry < 100:
retry += 1
''' This command hangs. Hence launch a thread in background and timeout '''
t = SshConnect(self)
t.start()
t.join(10)
if self._ssh != None:
break
time.sleep(5)
if self._ssh == None and t.isAlive():
print(
"************ %d. Kill frozen ssh connection to %s, retry" %
(retry, host))
try:
t._Thread_stop()
except:
print(
"%d. ssh to %s Thread could not be terminated!, ignore." %
(retry, host))
if self._ssh == None:
print("********* FATAL ********** SSH to %s failed!" % (host))
def execCmd(self, cmd, username, password, node, local_ip):
fab_connections.clear()
with hide('everything'):
with settings(
host_string='%s@%s' % (username, local_ip),
password=password,
warn_only=True, abort_on_prompts=False, debug=True):
if 'show' in cmd:
result = run_netconf_on_node(
host_string='%s@%s' % (
username, node),
password=password,
cmds=cmd, op_format='json')
#ssh_conf_file_alternate = "-o UserKnownHostsFile=/dev/null -o strictHostKeyChecking=no"
else:
output = run_fab_cmd_on_node(
host_string='%s@%s' % (username, node),
password=password, cmd=cmd, as_sudo=True)
return result
def testRemoteCmdExecuter():
aD = remoteCmdExecuter()
aD.execConnect('10.84.7.250', 'root', 'Embe1mpls')
# aD.execConnect( '10.84.7.42', 'root', 'c0ntrail123')
# print aD.execCmd ('ping 39.0.0.1 -I 10.84.7.42 -c 1 -W 1 | grep -i " 0%
# packet loss"')
print(aD.execCmd('cli show bgp summary | display xml'))
# print aD.execCmd ('ifsmon -Id | grep ROUTE')
# print aD.execCmd ('cli -c "show bgp summary"')
if __name__ == "__main__":
processList = []
for i in range(1, 2):
process = multiprocessing.Process(target=testRemoteCmdExecuter)
process.start()
processList.append(process)
for process in processList:
process.join()
|
gpsd_fake.py
|
import time
import socket
import threading
VERSION_HEADER = b'{"class":"VERSION","release":"3.17","rev":"3.17","proto_major":3,"proto_minor":12}\n'
WATCH_COMMAND = b'?WATCH={"enable":true,"json":true}\n'
GPSD_OUTPUT = """
{"class":"DEVICES","devices":[{"class":"DEVICE","path":"/dev/ttyO4","driver":"NMEA0183","activated":"2021-08-13T09:12:40.028Z","flags":1,"native":0,"bps":9600,"parity":"N","stopbits":1,"cycle":1.00}]}
{"class":"WATCH","enable":true,"json":true,"nmea":false,"raw":0,"scaled":false,"timing":false,"split24":false,"pps":false}
{"class":"SKY","device":"/dev/ttyO4","xdop":0.54,"ydop":0.77,"vdop":0.85,"tdop":1.00,"hdop":0.89,"gdop":2.12,"pdop":1.23,"satellites":[{"PRN":27,"el":84,"az":141,"ss":0,"used":false},{"PRN":8,"el":60,"az":294,"ss":16,"used":true},{"PRN":10,"el":60,"az":109,"ss":16,"used":true},{"PRN":23,"el":40,"az":59,"ss":17,"used":false},{"PRN":16,"el":33,"az":188,"ss":26,"used":true},{"PRN":21,"el":28,"az":256,"ss":16,"used":false},{"PRN":18,"el":12,"az":69,"ss":26,"used":true},{"PRN":7,"el":9,"az":288,"ss":0,"used":false},{"PRN":30,"el":9,"az":321,"ss":32,"used":true},{"PRN":15,"el":8,"az":28,"ss":0,"used":false},{"PRN":26,"el":6,"az":175,"ss":0,"used":false},{"PRN":1,"el":3,"az":251,"ss":0,"used":false},{"PRN":32,"el":2,"az":133,"ss":0,"used":false},{"PRN":13,"el":1,"az":2,"ss":0,"used":false},{"PRN":138,"el":0,"az":0,"ss":0,"used":false},{"PRN":83,"el":66,"az":321,"ss":0,"used":false},{"PRN":82,"el":50,"az":68,"ss":0,"used":false},{"PRN":67,"el":43,"az":98,"ss":19,"used":true},{"PRN":73,"el":35,"az":261,"ss":17,"used":true},{"PRN":74,"el":29,"az":320,"ss":21,"used":true},{"PRN":66,"el":27,"az":33,"ss":30,"used":true},{"PRN":68,"el":17,"az":150,"ss":0,"used":false},{"PRN":84,"el":12,"az":279,"ss":0,"used":false},{"PRN":80,"el":11,"az":215,"ss":0,"used":false},{"PRN":81,"el":5,"az":88,"ss":0,"used":false}]}
{"class":"TPV","device":"/dev/ttyO4","mode":3,"ept":0.005,"lat":51.813280233,"lon":6.550214200,"alt":30.393,"epx":8.171,"epy":11.499,"epv":19.550,"track":12.4500,"speed":0.000,"climb":0.000,"eps":23.00,"epc":39.10}
{"class":"SKY","device":"/dev/ttyO4","xdop":0.54,"ydop":0.77,"vdop":0.85,"tdop":1.00,"hdop":0.89,"gdop":2.12,"pdop":1.23,"satellites":[{"PRN":27,"el":84,"az":141,"ss":0,"used":false},{"PRN":8,"el":60,"az":294,"ss":16,"used":true},{"PRN":10,"el":60,"az":109,"ss":16,"used":true},{"PRN":23,"el":40,"az":59,"ss":17,"used":false},{"PRN":16,"el":33,"az":188,"ss":26,"used":true},{"PRN":21,"el":28,"az":256,"ss":16,"used":false},{"PRN":18,"el":12,"az":69,"ss":26,"used":true},{"PRN":7,"el":9,"az":288,"ss":0,"used":false},{"PRN":30,"el":9,"az":321,"ss":33,"used":true},{"PRN":15,"el":8,"az":28,"ss":0,"used":false},{"PRN":26,"el":6,"az":175,"ss":0,"used":false},{"PRN":1,"el":3,"az":251,"ss":0,"used":false},{"PRN":32,"el":2,"az":133,"ss":0,"used":false},{"PRN":13,"el":1,"az":2,"ss":0,"used":false},{"PRN":138,"el":0,"az":0,"ss":0,"used":false},{"PRN":83,"el":66,"az":321,"ss":0,"used":false},{"PRN":82,"el":50,"az":68,"ss":0,"used":false},{"PRN":67,"el":43,"az":98,"ss":19,"used":true},{"PRN":73,"el":35,"az":261,"ss":16,"used":true},{"PRN":74,"el":29,"az":320,"ss":21,"used":true},{"PRN":66,"el":27,"az":33,"ss":30,"used":true},{"PRN":68,"el":17,"az":150,"ss":0,"used":false},{"PRN":84,"el":12,"az":279,"ss":0,"used":false},{"PRN":80,"el":11,"az":215,"ss":0,"used":false},{"PRN":81,"el":5,"az":88,"ss":0,"used":false}]}
{"class":"TPV","device":"/dev/ttyO4","mode":3,"time":"2021-08-13T09:12:41.000Z","ept":0.005,"lat":51.813280233,"lon":6.550214200,"alt":30.393,"epx":8.171,"epy":11.499,"epv":19.550,"track":12.4500,"speed":0.000,"climb":0.000,"eps":23.00,"epc":39.10}
{"class":"SKY","device":"/dev/ttyO4","xdop":0.54,"ydop":0.77,"vdop":0.85,"tdop":1.00,"hdop":0.89,"gdop":2.12,"pdop":1.22,"satellites":[{"PRN":27,"el":84,"az":141,"ss":0,"used":false},{"PRN":8,"el":60,"az":294,"ss":16,"used":true},{"PRN":10,"el":60,"az":109,"ss":17,"used":true},{"PRN":23,"el":40,"az":59,"ss":17,"used":false},{"PRN":16,"el":33,"az":188,"ss":26,"used":true},{"PRN":21,"el":28,"az":256,"ss":16,"used":false},{"PRN":18,"el":12,"az":69,"ss":26,"used":true},{"PRN":7,"el":9,"az":288,"ss":0,"used":false},{"PRN":30,"el":9,"az":321,"ss":33,"used":true},{"PRN":15,"el":8,"az":28,"ss":0,"used":false},{"PRN":26,"el":6,"az":175,"ss":0,"used":false},{"PRN":1,"el":3,"az":251,"ss":0,"used":false},{"PRN":32,"el":2,"az":133,"ss":0,"used":false},{"PRN":13,"el":1,"az":2,"ss":0,"used":false},{"PRN":138,"el":0,"az":0,"ss":0,"used":false},{"PRN":83,"el":66,"az":321,"ss":0,"used":false},{"PRN":82,"el":50,"az":68,"ss":0,"used":false},{"PRN":67,"el":43,"az":98,"ss":19,"used":true},{"PRN":73,"el":35,"az":261,"ss":15,"used":true},{"PRN":74,"el":29,"az":320,"ss":21,"used":true},{"PRN":66,"el":27,"az":33,"ss":30,"used":true},{"PRN":68,"el":17,"az":150,"ss":0,"used":false},{"PRN":84,"el":12,"az":279,"ss":0,"used":false},{"PRN":80,"el":11,"az":215,"ss":0,"used":false},{"PRN":81,"el":5,"az":88,"ss":0,"used":false}]}
{"class":"TPV","device":"/dev/ttyO4","mode":3,"time":"2021-08-13T09:12:42.000Z","ept":0.005,"lat":51.813280233,"lon":6.550214200,"alt":30.393,"epx":8.171,"epy":11.499,"epv":19.550,"track":12.4500,"speed":0.000,"climb":0.000,"eps":23.00,"epc":39.10}
"""
def fake_gpsd_server():
addr = ("127.0.0.1", 2947)
if hasattr(socket, "create_server"):
sock = socket.create_server(address=addr, reuse_port=True)
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind(addr)
sock.listen(1)
client, _ = sock.accept()
client.send(VERSION_HEADER)
if client.recv(100) == WATCH_COMMAND:
n = 120
chunks = [GPSD_OUTPUT[i : i + n] for i in range(0, len(GPSD_OUTPUT), n)]
for chunk in chunks:
client.send(chunk.encode("utf-8"))
time.sleep(0.01)
if __name__ == "__main__":
server = threading.Thread(target=fake_gpsd_server)
server.start()
|
GardenManager.py
|
#GardenManager.py
from AnalogProbe import *
from Pump import *
from MCP3008 import MCP3008
from tkinter import *
import time
import threading
upperLimit = 10.50 #Starting pH for upper limit
lowerLimit = 8.50 #Starting pH for lower limit
def addToUpperLimit():
global upperLimit
if(upperLimit < 13.9):
upperLimit = upperLimit + 0.1
upperLimitDisplay = Label (win, text="{:.1f}".format(upperLimit), font=("Arial Bold",40),bg='NavajoWhite2') #creates abels for upper limits
upperLimitDisplay.place(x=520,y=125)
#print("Upper Limit is now",upperLimit)
else:
pass
def subFromUpperLimit():
global upperLimit
if(upperLimit > lowerLimit + 0.6):
upperLimit = upperLimit - 0.1
upperLimitDisplay = Label (win, text="{:.1f}".format(upperLimit), font=("Arial Bold",40),bg='NavajoWhite2')
upperLimitDisplay.place(x=520,y=125)
#print("Upper Limit is now",upperLimit)
else:
pass
def addToLowerLimit():
global lowerLimit
if(lowerLimit < upperLimit - 0.6):
lowerLimit = lowerLimit + 0.1
lowerLimitDisplay = Label (win, text="{:.1f}".format(lowerLimit), font=("Arial Bold",40),bg='NavajoWhite2')
lowerLimitDisplay.place(x=520,y=275)
#print("Lower Limit is now",lowerLimit)
else:
pass
def subFromLowerLimit():
global lowerLimit
if(lowerLimit > 0.1):
lowerLimit = lowerLimit - 0.1
lowerLimitDisplay = Label (win, text="{:.1f}".format(lowerLimit), font=("Arial Bold",40),bg='NavajoWhite2')
lowerLimitDisplay.place(x=520,y=275)
#print("Lower Limit is now",lowerLimit)
else:
pass
# Dispenses 5ml of base solution
def dispenseBase():
# Creates object p1 of class Pump
p1 = Pump('/dev/ttyAMA1')
# D is for dispense, number is for volume in ml
p1.send_cmd("D,2")
# Dispenses 5ml of acid solution
def dispenseAcid():
# Creates object p2 of class Pump
p2 = Pump('/dev/ttyAMA0')
# D is for dispense, number is for volume in ml
p2.send_cmd("D,2")
#TODO: add conditionals so that lower limit will never exceed upper limit
#Upper limit must not exceed 14, lower limit must not go below 0.
win = Tk()
f = Frame(win)
# Title
win.title("pH Dashboard")
win.minsize(width=800,height=480) #matched to Elemnent14 7" touchscreen display screen resolution
win.configure(bg='NavajoWhite3')
# Displays current ph on a label and updates every 2 seconds
def displaypH():
while True:
# gets current pH and stores in variable currentph
currentph = "{:.2f}".format(AnalogProbe.getpH())
currentphlabel = Label (win, text=currentph,font=("Arial Bold",60),bg='NavajoWhite2')
currentphlabel.place(x=240,y=170)
time.sleep(2)
# Tests if pH is within range and dispenses Acid or Base accordingly
def regulatepH():
while True:
if(AnalogProbe.getpH() > upperLimit):
print("too basic. dispensing acid")
dispenseAcid()
time.sleep(5)
if(AnalogProbe.getpH() < lowerLimit):
print("too acidic. dispensing base")
dispenseBase()
time.sleep(5)
else:
print("ph is within range. testing again in 5 seconds")
time.sleep(5)
# Displays upper and lower limit before any changes are made via buttons
upperLimitDisplay = Label (win, text=upperLimit, font=("Arial Bold",40),bg='NavajoWhite2') #creates labels for upper limits
upperLimitDisplay.place(x=520,y=125)
lowerLimitDisplay = Label (win, text=lowerLimit, font=("Arial Bold",40),bg='NavajoWhite2')
lowerLimitDisplay.place(x=520,y=275)
# Displays Up and Down buttons to control the upper and lower thresholds
button1 = Button(win, text=' Up ', command = addToUpperLimit,bg='NavajoWhite2' )
button1.place(x=650,y=120)
button2 = Button(win, text='Down', command = subFromUpperLimit,bg='NavajoWhite2')
button2.place(x=650,y=160)
button3 = Button(win, text=' Up ', command = addToLowerLimit,bg='NavajoWhite2')
button3.place(x=650,y=270)
button4 = Button(win, text='Down', command = subFromLowerLimit,bg='NavajoWhite2')
button4.place(x=650,y=310)
# Dispense Acid Button
button5 = Button(win, text='Dispense Acid', command = dispenseAcid,bg='NavajoWhite2', font=("Arial Bold",20))
button5.place(x=80,y=370)
# Dispense Base Button
button6 = Button(win, text='Dispense Base', command = dispenseBase,bg='NavajoWhite2', font=("Arial Bold",20))
button6.place(x=330,y=370)
# Creates label "The current pH is:"
lbl = Label (win, text="pH: ", font=("Arial Bold",60),bg='NavajoWhite2')
lbl.place(x=80,y=170)
header = Label(win, text = " Auto pH Regulator ",font=("Arial Bold",50),bg='NavajoWhite2' )
header.place(x=70,y=30)
x = threading.Thread(target = displaypH) #starts a new thread displaypH so that the program doesnt freeze
x.start()
y = threading.Thread(target = regulatepH)
y.start()
win.mainloop()
|
test_session.py
|
import os
import threading
import time
import socket
import pytest
import cherrypy
from cherrypy._cpcompat import (
copykeys, json_decode,
HTTPConnection, HTTPSConnection
)
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
from cherrypy.test import helper
localDir = os.path.dirname(__file__)
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ', '.join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed) # noqa: E305
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, 'session'):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return 'done'
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return 'OK'
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return 'logged in'
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
os.unlink(os.path.join(localDir, fname))
@pytest.mark.xfail(reason='#1534')
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertDictEqual(json_decode(self.body),
{'counter': 3, 'aha': 'foo'})
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
self.getPage('/delete', cookieset1)
self.assertBody('done')
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
@pytest.mark.xfail(reason='#1306')
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('FileSession')
@pytest.mark.xfail(reason='#1540')
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
path = os.path.join(localDir, 'session-' + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
@pytest.mark.xfail(reason='#1557')
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()),
set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = copykeys(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail('The second session did not time out.')
else:
self.fail('Unknown session id in cache: %r', cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
start = time.time()
while not sessions.RamSession.locks and time.time() - start < 5:
time.sleep(0.01)
assert len(sessions.RamSession.locks) == 1, 'Lock not acquired'
s2 = sessions.RamSession()
s2.clean_up()
assert len(sessions.RamSession.locks) == 1, 'Clean up should not remove active lock'
t.join()
try:
import memcache # NOQA
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip('memcached not reachable ')
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.Sessions.MemcachedSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody('NotImplementedError')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('memcached')
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2018-2021 The EMRALS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test emralsd shutdown."""
from test_framework.test_framework import EMRALSTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(EMRALSTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env):
parent_remote.close()
env.create()
try:
done = False
while True:
cmd, data = remote.recv()
if cmd == 'step':
if done:
ob, info = env.reset()
reward = 0
done = False
else:
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob, info = env.reset()
remote.send((ob, info))
elif cmd == 'close':
env.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class VecEnv:
def __init__(self, num_envs, env):
self.closed = False
self.num_envs = num_envs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(num_envs)])
self.ps = [Process(target=worker, args=(work_remote, remote, env))
for (work_remote, remote) in zip(self.work_remotes, self.remotes)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
def step(self, actions):
self._assert_not_closed()
assert len(actions) == self.num_envs, "Error: incorrect number of actions."
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rewards, dones, infos = zip(*results)
return np.stack(obs), np.stack(rewards), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, infos = zip(*results)
return np.stack(obs), infos
def close_extras(self):
self.closed = True
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
|
server.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 50,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs path, don't split on colon
if gcs.IsGCSPath(specification):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not gcs.IsGCSPath(path):
path = os.path.realpath(path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon_threads = True
def BuildServer(multiplexer, host, port, logdir):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
logdir: The logdir argument string that tensorboard started up with.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer, logdir)
return ThreadedHTTPServer((host, port), factory)
|
server_message.py
|
import threading
import socket
host = '127.0.0.1'
port = 55554
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} left the chat'.encode('ascii'))
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f"connected with {str(address)}")
client.send('nick'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of the client is {nickname}")
broadcast(f'{nickname} joined the chat'.encode('ascii'))
client.send("connected to the server".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
receive()
|
fio_rebuild.py
|
import fio
import initiator
import json
import lib
import pos.env
import target
import threading
import time
import traceback
from datetime import datetime
def create_fio(tc, now_date, init, test_target, time_based):
skip_workload = False
fio_cmdset = []
rw = tc[0]
bs = tc[1]
iodepth = tc[2]
test_name = tc[3]
output_name = f"{now_date}_fio_{bs}_{rw}_{test_name}"
test_fio = fio.manager.Fio(init.id, init.pw, init.nic_ssh)
test_fio.opt["ioengine"] = f"{init.spdk_dir}/examples/nvme/fio_plugin/fio_plugin"
if time_based is True:
test_fio.opt["time_based"] = "1"
test_fio.opt["runtime"] = "6000"
else:
test_fio.opt["time_based"] = "0"
test_fio.opt["io_size"] = "20g"
test_fio.opt["ramp_time"] = "15"
test_fio.opt["verify"] = "0"
test_fio.opt["size"] = "100%"
test_fio.opt["serialize_overlap"] = "1"
test_fio.opt["numjobs"] = "1"
test_fio.opt["thread"] = "1"
test_fio.opt["direct"] = "1"
if "randrw" == rw:
test_fio.opt["rwmixread"] = "0"
test_fio.opt["readwrite"] = rw
test_fio.opt["bs"] = bs
test_fio.opt["iodepth"] = iodepth
test_fio.opt["eta"] = "always"
test_fio.opt["group_reporting"] = "1"
test_fio.opt["output-format"] = "json"
test_fio.opt["output"] = f"{init.output_dir}/{output_name}_{init.name}"
if "randwrite" == rw or "randread" == rw:
if test_fio.opt.get("norandommap"):
del test_fio.opt["norandommap"]
else:
test_fio.opt["norandommap"] = "1"
if "512-128k" == bs:
test_fio.opt["bsrange"] = bs
else:
if test_fio.opt.get("bsrange"):
del test_fio.opt["bsrange"]
for subsys in test_target.subsystem_list:
if subsys[0] == init.name:
test_fio.jobs.append(
f" --name=job_{subsys[2]} --filename=\"trtype={test_target.spdk_tp} adrfam=IPv4 traddr={subsys[3]} trsvcid={subsys[4]} subnqn={subsys[1]} ns=1\"")
if not test_fio.Prepare():
skip_workload = True
break
return test_fio.cmd, f"{output_name}_{init.name}"
if not skip_workload:
try:
print(f" run -> {now_date}_fio_{bs}_{rw}")
lib.subproc.sync_run(test_fio.cmd)
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
try:
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output_name}_{init.name}.eta {json_scenario['OUTPUT_DIR']}")
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output_name}_{init.name} {json_scenario['OUTPUT_DIR']}")
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
def play(json_targets, json_inits, json_scenario):
lib.printer.green(f"\n -- '{__name__}' has began --")
raw_date = datetime.now()
now_date = raw_date.strftime("%y%m%d_%H%M%S")
skip_workload = False
# validate arguments
if 0 == len(json_targets):
lib.printer.red(" TargetError: At least 1 target has to exist")
return
if 0 == len(json_inits):
lib.printer.red(" InitiatorError: At least 1 initiator has to exist")
return
# target prepare
targets = {}
for json_target in json_targets:
try:
target_obj = target.manager.Target(json_target)
except Exception as e:
lib.printer.red(traceback.format_exc())
return
target_name = json_target["NAME"]
try:
target_obj.Prepare()
except Exception as e:
lib.printer.red(traceback.format_exc())
skip_workload = True
target_obj.ForcedExit()
break
targets[target_name] = target_obj
# init prepare
initiators = {}
for json_init in json_inits:
try:
init_obj = initiator.manager.Initiator(json_init)
except Exception as e:
lib.printer.red(traceback.format_exc())
skip_workload = True
break
init_name = json_init["NAME"]
try:
init_obj.Prepare()
except Exception as e:
lib.printer.red(traceback.format_exc())
skip_workload = True
break
initiators[init_name] = init_obj
# check auto generate
if not skip_workload:
test_target = targets[next(iter(targets))]
if "yes" != test_target.use_autogen:
lib.printer.red(
f"{__name__} [Error] check [TARGET][AUTO_GENERATE][USE] is 'yes' ")
skip_workload = True
# run precondition
precondition = json_scenario["PRECONDITION"]
init_idx = next(iter(initiators))
init = initiators[init_idx]
if (not skip_workload) and (precondition == "yes"):
lib.printer.green(f" fio start")
# readwrite, block size, io depth
testcase = [["write", "128k", "4", "seqfill"],
["randwrite", "4k", "128", "randfill1"],
["randwrite", "4k", "128", "randfill2"]
]
for tc in testcase:
fio_cmd, output = create_fio(
tc, now_date, init, test_target, False)
try:
print(f" run -> {output}")
lib.subproc.sync_run(fio_cmd)
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
try:
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output}.eta {json_scenario['OUTPUT_DIR']}")
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output} {json_scenario['OUTPUT_DIR']}")
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
lib.printer.green(f" precondition end")
# create fio which will be run during rebuild
testcase = [["randwrite", "4k", "128", "noio"],
["randwrite", "4k", "128", "highest"],
["randwrite", "4k", "128", "higher"],
["randwrite", "4k", "128", "high"],
["randwrite", "4k", "128", "medium"],
["randwrite", "4k", "128", "low"],
["randwrite", "4k", "128", "lower"],
["randwrite", "4k", "128", "lowest"]]
lib.printer.green(f" precondition end")
# trigger rebuild
arr_name = json_target["POS"]["ARRAYs"][0]["NAME"]
target_obj = targets["Target01"]
target_obj.PcieScan()
device_list = []
for tc in testcase:
# create device list
device_list = []
ret = target_obj.DeviceList()
for line in ret.split('\n'):
tokens = line.split('|')
if 'unvme' in tokens[0]:
device_list.append(tokens[2].replace(' ', ''))
if 'noio' not in tc[3]:
try:
# set rebuild impact level
print("start rebuild with rebuild impact " + tc[3])
ret = target_obj.SetRebuildImpact(tc[3])
# run fio
fio_cmd, output = create_fio(
tc, now_date, init, test_target, True)
thread = threading.Thread(
target=lib.subproc.sync_run, args=[fio_cmd, True])
thread.daemon = True
thread.start()
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
# detach device to trigger rebuild
device_to_detach = 0
target_obj.DetachDevice(device_list[device_to_detach])
# wait until rebuild finishes
while(True):
ret = target_obj.CheckRebuildComplete(arr_name)
if ('Situation : REBUILDING' in ret):
print("rebuilding")
if ('Situation : NORMAL' in ret):
print("normal")
break
time.sleep(1)
if 'noio' not in tc[3]:
try:
lib.subproc.sync_run(
f"sshpass -p {init.pw} ssh {init.id}@{init.nic_ssh} nohup 'pkill -15 fio'")
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output}.eta {json_scenario['OUTPUT_DIR']}")
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:{init.output_dir}/{output} {json_scenario['OUTPUT_DIR']}")
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
# reattach device as spare for next tc
target_obj.PcieScan()
ret = target_obj.DeviceList()
for line in ret.split('\n'):
tokens = line.split('|')
if ('unvme' in tokens[0]) and ('SYSTEM' in tokens[3]):
target_obj.AddSpare(arr_name, tokens[0])
break
print(tc, "retuild complete")
try:
lib.subproc.sync_run(
f"sshpass -p {init.pw} scp {init.id}@{init.nic_ssh}:/var/log/pos/rebuild_log {json_scenario['OUTPUT_DIR']}")
except Exception as e:
lib.printer.red(f"{__name__} [Error] {e}")
skip_workload = True
return
# init wrapup
for key in initiators:
try:
initiators[key].Wrapup()
except Exception as e:
lib.printer.red(traceback.format_exc())
skip_workload = True
# target warpup
for key in targets:
try:
targets[key].Wrapup()
except Exception as e:
lib.printer.red(traceback.format_exc())
targets[key].ForcedExit()
skip_workload = True
if skip_workload:
lib.printer.red(f" -- '{__name__}' unexpected done --\n")
else:
lib.printer.green(f" -- '{__name__}' successfully done --\n")
|
context_test.py
|
import shutil
import tempfile
import threading
try:
import greenlet
except ImportError:
greenlet = None
try:
import stackless
except ImportError:
stackless = None
from pytest import mark, raises
from sqlalchemy_imageattach.context import (ContextError, current_store,
get_current_store,
pop_store_context,
push_store_context, store_context)
from sqlalchemy_imageattach.store import Store
from sqlalchemy_imageattach.stores.fs import FileSystemStore
def test_store_context():
path = tempfile.mkdtemp()
store = FileSystemStore(path, 'http://localhost/')
with raises(ContextError):
get_current_store()
with raises(ContextError):
current_store.get_current_object()
store2 = Store()
with store_context(store) as s:
assert s is store
assert get_current_store() is store
assert current_store == store
with store_context(store2) as s2:
assert s2 is store2
assert get_current_store() is store2
assert current_store == store2
with store_context(store) as s3:
assert s3 is store
assert get_current_store() is store
assert current_store == store
assert s2 is store2
assert get_current_store() is store2
assert current_store == store2
assert s is store
assert get_current_store() is store
assert current_store == store
with raises(ContextError):
get_current_store()
with raises(ContextError):
current_store.get_current_object()
shutil.rmtree(path)
def test_push_pop():
store_1 = Store()
store_2 = Store()
with raises(ContextError):
get_current_store()
push_store_context(store_1)
assert get_current_store() is store_1
push_store_context(store_2)
assert get_current_store() is store_2
pop_store_context()
assert get_current_store() is store_1
pop_store_context()
with raises(ContextError):
get_current_store()
def test_thread_context():
values = []
store_1 = Store()
store_2 = Store()
def context_1():
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_1):
values.append(get_current_store())
thread_2.start()
thread_2.join()
values.append(get_current_store())
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
def context_2():
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_2):
values.append(get_current_store())
thread_1 = threading.Thread(target=context_1)
thread_2 = threading.Thread(target=context_2)
thread_1.start()
thread_1.join()
assert values == ['error', store_1, 'error', store_2, store_1, 'error']
@mark.skipif('greenlet is None')
def test_greenlet_context():
values = []
store_1 = Store()
store_2 = Store()
def context_1():
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_1):
values.append(get_current_store())
greenlet_2.switch()
values.append(get_current_store())
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
def context_2():
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_2):
values.append(get_current_store())
greenlet_1.switch()
greenlet_1 = greenlet.greenlet(context_1)
greenlet_2 = greenlet.greenlet(context_2)
greenlet_1.switch()
assert values == ['error', store_1, 'error', store_2, store_1, 'error']
@mark.skipif('stackless is None')
def test_stackless_context():
values = []
store_1 = Store()
store_2 = Store()
def context_1(channel, join_channel):
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_1):
values.append(get_current_store())
task_2(channel)
channel.receive()
values.append(get_current_store())
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
join_channel.send(None)
def context_2(channel):
try:
s = get_current_store()
except ContextError:
values.append('error')
else:
values.append(s)
with store_context(store_2):
values.append(get_current_store())
channel.send(None)
task_1 = stackless.tasklet(context_1)
task_2 = stackless.tasklet(context_2)
channel = stackless.channel()
join_channel = stackless.channel()
task_1(channel, join_channel)
join_channel.receive()
assert values == ['error', store_1, 'error', store_2, store_1, 'error']
|
imceventhandler.py
|
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is responsible for event handling of the events exposed by
IMC server.
"""
from __future__ import print_function
try:
from Queue import Queue
except:
from queue import Queue
from threading import Condition, Lock, Thread
import datetime
import logging
import time
from . import imcmo
from . import imccoreutils
from . import imcxmlcodec as xc
from .imcexception import ImcWarning
from .imcexception import ImcValidationException
log = logging.getLogger('imc')
class MoChangeEvent(object):
"""
This class provides structure to save an event generated for any change,
its associated managed object and property change list.
This functionality is used during add_event_handler.
"""
def __init__(self, event_id=None, mo=None, change_list=None):
self.event_id = event_id
self.mo = mo
self.change_list = change_list
class WatchBlock(object):
"""
This class handles the functionality about the Event Handling/Watch block.
enqueue/dequeue fuctionality of events is handled by this class.
This functionality is used during add_event_handler.
"""
def __init__(self, params, fmce, capacity, callback):
self.fmce = fmce
self.callback = callback
self.capacity = capacity
self.params = params
self.overflow = False
self.error_code = 0 # TODO:error_code to call notify as per PowerTool
self.event_q = Queue() # infinite size Queue
def dequeue(self, miliseconds_timeout):
"""Internal method to dequeue the events."""
while True:
if self.error_code != 0:
log.debug("queue error:" + str(self.error_code))
return None
if not self.event_q.empty():
mo_chg_event = self.event_q.get()
return mo_chg_event
else:
return None
def enqueue(self, cmce):
"""Internal method to enqueue the events."""
if self.event_q.maxsize < self.capacity:
self.event_q.put(cmce)
else:
self.overflow = True
def dequeue_default_callback(self, mce):
"""Default callback method."""
tab_size = 8
print("\n")
print('EventId'.ljust(tab_size * 2) + ':' + str(mce.event_id))
print('ChangeList'.ljust(tab_size * 2) + ':' + str(mce.change_list))
print('ClassId'.ljust(tab_size * 2) + ':' + str(mce.mo.get_class_id()))
print('MoDn'.ljust(tab_size * 2) + ':' + str(mce.mo.dn))
class ImcEventHandle(object):
"""This class provides api to add and remove event handler."""
def __init__(self, handle):
self.__handle = handle
self.__lock_object = None
self.__wbs = []
self.__wbs_lock = Lock()
self.__enqueue_thread = None
self.__condition = Condition()
self.__event_chan_resp = None
self.__dequeue_thread = None
self.__lowest_timeout = None
self.__wb_to_remove = []
def __get_mo_elem(self, xml_str):
"""
Internal method to extract mo elements from xml string
"""
root = xc.extract_root_elem(xml_str)
mo_elems = []
if root.tag == "methodVessel":
for in_stimuli in root:
for cmce in in_stimuli:
for in_config in cmce:
for mo_elem in in_config:
mo_elems.append(
(mo_elem, cmce.attrib.get('inEid')))
elif root.tag == "configMoChangeEvent":
for in_config in root:
for mo_elem in in_config:
mo_elems.append(mo_elem)
return mo_elems
def __enqueue_function(self):
"""
Internal method used by add_event_handler.
Provides functionality of enqueue/dequeue of the events and
triggering callbacks.
"""
try:
xml_query = '<eventSubscribe cookie="%s"/>' % self.__handle.cookie
self.__event_chan_resp = self.__handle.post_xml(
xml_str=xml_query.encode(), read=False)
except Exception:
raise
try:
while self.__event_chan_resp and len(self.__wbs):
if self.__handle.cookie is None or \
self.__event_chan_resp is None:
break
resp = self.__event_chan_resp.readline()
resp = self.__event_chan_resp.read(int(resp))
for mo_elem in self.__get_mo_elem(resp):
gmo = imcmo.generic_mo_from_xml_elem(mo_elem[0])
mce = MoChangeEvent(event_id=mo_elem[1],
mo=gmo.to_mo(),
change_list=gmo.properties.keys())
for watch_block in self.__wbs:
if watch_block.fmce(mce):
watch_block.enqueue(mce)
with self.__condition:
self.__condition.notify()
if len(self.__wbs) == 0:
self.__condition.acquire()
self.__condition.notify()
self.__condition.release()
except:
raise
def __thread_enqueue_start(self):
"""
Internal method to start the enqueue thread which adds the events in
an internal queue.
"""
self.__enqueue_thread = Thread(name="enqueue_thread",
target=self.__enqueue_function)
self.__enqueue_thread.daemon = True
self.__enqueue_thread.start()
def __time_left(self, watch_block):
timeout_sec = watch_block.params["timeout_sec"]
start_time = watch_block.params["start_time"]
time_diff = datetime.datetime.now() - start_time
if time_diff.seconds < timeout_sec:
return timeout_sec - time_diff.seconds
else:
return 0
# return 2147483647
def __dequeue_mce(self, time_left, watch_block):
if time_left and time_left > 0:
if self.__lowest_timeout is None or \
self.__lowest_timeout > time_left:
self.__lowest_timeout = time_left
mce = watch_block.dequeue(time_left)
else:
mce = watch_block.dequeue(2147483647)
return mce
def __prop_val_exist(self, mo, prop, success_value,
failure_value, transient_value,
change_list=None):
if isinstance(mo, imcmo.GenericMo):
n_prop = prop
n_prop_val = mo.properties[n_prop]
elif prop not in mo.prop_meta:
n_prop = prop
n_prop_val = getattr(mo, n_prop)
else:
n_prop = mo.prop_meta[prop].xml_attribute
n_prop_val = getattr(mo, n_prop)
if change_list and n_prop not in change_list:
return False
if (len(success_value) > 0 and n_prop_val in success_value) or \
(len(failure_value) > 0 and n_prop_val in failure_value) or \
(len(transient_value) > 0 and n_prop_val in transient_value):
return True
return False
def __dequeue_mo_prop_poll(self, mo, prop, poll_sec, watch_block,
timeout_sec=None, time_left=None):
success_value = watch_block.params["success_value"]
failure_value = watch_block.params["failure_value"]
transient_value = watch_block.params["transient_value"]
if not success_value or len(success_value) < 1:
raise ValueError("success_value is missing.")
pmo = self.__handle.query_dn(mo.dn)
if pmo is None:
ImcWarning('Mo ' + pmo.dn + ' not found.')
return
if timeout_sec and time_left and time_left > 0:
if time_left < poll_sec:
poll_sec = timeout_sec - time_left
if self.__lowest_timeout is None or self.__lowest_timeout > poll_sec:
self.__lowest_timeout = poll_sec
if self.__prop_val_exist(pmo, prop, success_value,
failure_value, transient_value):
log.info("Successful")
self.__wb_to_remove.append(watch_block)
def __dequeue_mo_prop_event(self, prop, watch_block, time_left=None):
success_value = watch_block.params["success_value"]
failure_value = watch_block.params["failure_value"]
transient_value = watch_block.params["transient_value"]
if not success_value or len(success_value) < 1:
raise ValueError("success_value is missing.")
# dequeue mce
mce = self.__dequeue_mce(time_left, watch_block)
if mce is None:
return
# checks if prop value exist in success or failure or transient values
attributes = mce.change_list
if self.__prop_val_exist(mce.mo, prop, success_value, failure_value,
transient_value, attributes):
if watch_block.callback:
ctxt = watch_block.params['context']
ctxt["done"] = True
watch_block.callback(mce)
self.__wb_to_remove.append(watch_block)
def __dequeue_mo_until_removed(self, watch_block, time_left=None):
# dequeue mce
mce = self.__dequeue_mce(time_left, watch_block)
if mce is None:
return
if watch_block.callback:
watch_block.callback(mce)
# watch mo until gets deleted
if mce.mo.status == "deleted":
self.__wb_to_remove.append(watch_block)
def __dequeue_all_class_id(self, watch_block, time_left=None):
# dequeue mce
mce = self.__dequeue_mce(time_left, watch_block)
if mce and watch_block.callback:
watch_block.callback(mce)
def __dequeue_function(self):
"""
Internal method to dequeue to events.
"""
while len(self.__wbs):
self.__lowest_timeout = None
self.__wb_to_remove = []
try:
for watch_block in self.__wbs:
mo = watch_block.params["managed_object"]
prop = watch_block.params["prop"]
poll_sec = watch_block.params["poll_sec"]
timeout_sec = watch_block.params["timeout_sec"]
# checks if watch_block is not timed out, else remove
time_left = None
if timeout_sec:
time_left = self.__time_left(watch_block)
if time_left <= 0:
self.__wb_to_remove.append(watch_block)
continue
# poll for mo. Not to monitor event.
if poll_sec and mo:
self.__dequeue_mo_prop_poll(mo, prop, poll_sec,
watch_block, timeout_sec,
time_left)
elif mo:
# watch mo until prop_val changed to desired value
if prop:
self.__dequeue_mo_prop_event(prop, watch_block,
time_left)
# watch mo until it is removed
else:
self.__dequeue_mo_until_removed(watch_block,
time_left)
elif mo is None:
# watch all event or specific to class_id
self.__dequeue_all_class_id(watch_block, time_left)
except Exception as e:
log.info(str(e))
self.__wb_to_remove.append(watch_block)
# removing watch_block
if len(self.__wb_to_remove):
self.__wbs_lock.acquire()
for wb in self.__wb_to_remove:
if "context" in wb.params:
ctxt = wb.params['context']
ctxt["done"] = True
self.watch_block_remove(wb)
self.__wb_to_remove = []
self.__wbs_lock.release()
# wait for more events only if watch_block exists
if len(self.__wbs):
with self.__condition:
self.__condition.wait(self.__lowest_timeout)
return
def __thread_dequeue_start(self):
"""
Internal method to start dequeue thread.
"""
self.__dequeue_thread = Thread(name="dequeue_thread",
target=self.__dequeue_function)
self.__dequeue_thread.daemon = True
self.__dequeue_thread.start()
def watch_block_add(self, params,
filter_callback,
capacity=500,
callback=None):
"""
Internal method to add a watch block for starting event monitoring.
"""
if self.__handle.cookie is None:
return None
self.__wbs_lock.acquire()
watch_block = WatchBlock(params,
filter_callback,
capacity,
callback) # Add a List of Watchers
if watch_block and watch_block.callback is None:
watch_block.callback = watch_block.dequeue_default_callback
self.__wbs.append(watch_block)
self.__wbs_lock.release()
return watch_block
def watch_block_remove(self, watch_block):
"""
Internal method to remove a watch block for
stopping event monitoring.
"""
if watch_block in self.__wbs:
self.__wbs.remove(watch_block)
def _add_class_id_watch(self, class_id):
if imccoreutils.find_class_id_in_mo_meta_ignore_case(class_id) is None:
raise ImcValidationException(
"Invalid ClassId %s specified." % class_id)
def watch__type_filter(mce):
"""
Callback method to work on events with a specific class_id.
"""
if mce.mo.get_class_id().lower() == class_id.lower():
return True
return False
return watch__type_filter
def _add_mo_watch(self, managed_object, prop=None, success_value=[],
poll_sec=None, platform=None):
if imccoreutils.find_class_id_in_mo_meta_ignore_case(
managed_object.get_class_id()) is None:
raise ImcValidationException(
"Unknown ClassId %s provided." %
managed_object.get_class_id())
if prop:
mo_property_meta = imccoreutils.get_mo_property_meta(
managed_object.get_class_id(), prop, platform)
if mo_property_meta is None:
raise ImcValidationException(
"Unknown Property %s provided." % prop)
if not success_value:
raise ImcValidationException(
"success_value parameter is not provided.")
if poll_sec is None:
def watch_mo_filter(mce):
"""
Callback method to work on events specific to respective
managed object.
"""
if mce.mo.dn == managed_object.dn:
return True
return False
return watch_mo_filter
else:
def watch_none_filter(mce):
"""
Callback method to ignore all events.
"""
return False
return watch_none_filter
def add(self,
class_id=None,
managed_object=None,
prop=None,
success_value=[],
failure_value=[],
transient_value=[],
poll_sec=None,
timeout_sec=None,
call_back=None,
context=None,
platform=None):
"""
Adds an event handler.
An event handler can be added using this method where an user can
subscribe for the event channel from IMC and can monitor those events
for any specific success value or failure value for a managed object.
Args:
class_id (str): managed object class id
managed_object (ManagedObject)
prop (str) - property of the managed object to monitor
success_value (list) - success values of a prop
failure_value (list) - failure values of a prop
transient_value (list) - transient values of a prop
poll_sec - specifies the time in seconds for polling event.
timeout_sec - time after which method should stop monitoring.
call_back - call back method
"""
if class_id and managed_object:
raise ImcValidationException(
"Specify either class_id or managedObject, not both")
if class_id:
filter_callback = self._add_class_id_watch(class_id)
elif managed_object:
filter_callback = self._add_mo_watch(managed_object, prop,
success_value, poll_sec,
platform)
else:
def watch_all_filter(mce):
"""
Callback method to work on all events.
"""
return True
filter_callback = watch_all_filter
param_dict = {'class_id': class_id,
'managed_object': managed_object,
'prop': prop,
'success_value': success_value,
'failure_value': failure_value,
'transient_value': transient_value,
'poll_sec': poll_sec,
'timeout_sec': timeout_sec,
'call_back': call_back,
'start_time': datetime.datetime.now(),
'context': context}
if filter_callback is None:
raise ImcValidationException("Error adding WatchBlock...")
watch_block = self.watch_block_add(params=param_dict,
filter_callback=filter_callback,
callback=call_back)
if watch_block and len(self.__wbs) == 1:
if poll_sec is None:
self.__thread_enqueue_start()
self.__thread_dequeue_start()
return watch_block
def remove(self, watch_block):
"""
Removes an event handler.
"""
self.__wbs_lock.acquire()
if watch_block in self.__wbs:
self.watch_block_remove(watch_block)
else:
ImcWarning("Event handler not found")
self.__wbs_lock.release()
def clean(self):
"""
Removes all the watch blocks from the event handler
"""
self.__wbs_lock.acquire()
for each in self.__wbs:
self.watch_block_remove(each)
self.__wbs_lock.release()
def get(self):
"""
Returns the list of event handlers.
"""
return self.__wbs
def wait(handle, mo, prop, value, cb, timeout_sec=None):
"""
Waits for `mo.prop == value`
Args:
handle(ImcHandle): connection handle to the server
mo (Managed Object): managed object to watch
prop (str): property to watch
value (str): property value to wait for
cb(function): callback on success
timeout_sec (int): timeout
Returns:
None
Example:
This method is called from ImcHandle class,
wait_for_event method
"""
# create a new event handler
ueh = ImcEventHandle(handle)
context = {}
context["done"] = False
if isinstance(value, list):
success_value = value
else:
success_value = [value]
# create a watch block
ueh.add(managed_object=mo, prop=prop, success_value=success_value,
call_back=cb, timeout_sec=timeout_sec, context=context)
# wait for the event to occur
while not context["done"]:
time.sleep(1)
|
robolink.py
|
# Copyright 2015 - RoboDK Inc. - https://robodk.com/
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# --------------------------------------------
# --------------- DESCRIPTION ----------------
# This file defines the following two classes:
# Robolink()
# Item()
# These classes are the objects used to interact with RoboDK and create macros.
# An item is an object in the RoboDK tree (it can be either a robot, an object, a tool, a frame, a program, ...).
# Items can be retrieved from the RoboDK station using the Robolink() object (such as Robolink.Item() method)
#
# In this document: pose = transformation matrix = homogeneous matrix = 4x4 matrix
#
# More information about the RoboDK API for Python here:
# https://robodk.com/doc/en/RoboDK-API.html
# https://robodk.com/doc/en/PythonAPI/index.html
#
# More information about RoboDK post processors here:
# https://robodk.com/help#PostProcessor
#
# Visit the Matrix and Quaternions FAQ for more information about pose/homogeneous transformations
# http://www.j3d.org/matrix_faq/matrfaq_latest.html
#
# --------------------------------------------
import struct
from robodk import *
from warnings import warn
import sys # Only used to detect python version using sys.version_info
# Tree item types
ITEM_TYPE_STATION=1
ITEM_TYPE_ROBOT=2
ITEM_TYPE_FRAME=3
ITEM_TYPE_TOOL=4
ITEM_TYPE_OBJECT=5
ITEM_TYPE_TARGET=6
ITEM_TYPE_PROGRAM=8
ITEM_TYPE_INSTRUCTION=9
ITEM_TYPE_PROGRAM_PYTHON=10
ITEM_TYPE_MACHINING=11
ITEM_TYPE_BALLBARVALIDATION=12
ITEM_TYPE_CALIBPROJECT=13
ITEM_TYPE_VALID_ISO9283=14
ITEM_TYPE_FOLDER=17
ITEM_TYPE_ROBOT_ARM=18
# Instruction types
INS_TYPE_INVALID = -1
INS_TYPE_MOVE = 0
INS_TYPE_MOVEC = 1
INS_TYPE_CHANGESPEED = 2
INS_TYPE_CHANGEFRAME = 3
INS_TYPE_CHANGETOOL = 4
INS_TYPE_CHANGEROBOT = 5
INS_TYPE_PAUSE = 6
INS_TYPE_EVENT = 7
INS_TYPE_CODE = 8
INS_TYPE_PRINT = 9
# Move types
MOVE_TYPE_INVALID = -1
MOVE_TYPE_JOINT = 1
MOVE_TYPE_LINEAR = 2
MOVE_TYPE_CIRCULAR = 3
MOVE_TYPE_LINEARSEARCH = 4 # Such as ABB's SearchL function
# Station parameters request
PATH_OPENSTATION = 'PATH_OPENSTATION'
FILE_OPENSTATION = 'FILE_OPENSTATION'
PATH_DESKTOP = 'PATH_DESKTOP'
# Script execution types
RUNMODE_SIMULATE=1 # performs the simulation moving the robot (default)
RUNMODE_QUICKVALIDATE=2 # performs a quick check to validate the robot movements
RUNMODE_MAKE_ROBOTPROG=3 # makes the robot program
RUNMODE_MAKE_ROBOTPROG_AND_UPLOAD=4 # makes the robot program and updates it to the robot
RUNMODE_MAKE_ROBOTPROG_AND_START=5 # makes the robot program and starts it on the robot (independently from the PC)
RUNMODE_RUN_ROBOT=6 # moves the real robot from the PC (PC is the client, the robot behaves like a server)
# Program execution type
PROGRAM_RUN_ON_SIMULATOR=1 # Set the program to run on the simulator
PROGRAM_RUN_ON_ROBOT=2 # Set the program to run on the robot
# Robot connection status
ROBOTCOM_PROBLEMS = -3
ROBOTCOM_DISCONNECTED = -2
ROBOTCOM_NOT_CONNECTED = -1
ROBOTCOM_READY = 0
ROBOTCOM_WORKING = 1
ROBOTCOM_WAITING = 2
ROBOTCOM_UNKNOWN = -1000
# TCP calibration methods
CALIBRATE_TCP_BY_POINT = 0
CALIBRATE_TCP_BY_PLANE = 1
# Reference frame calibration methods
CALIBRATE_FRAME_3P_P1_ON_X = 0 # Calibrate by 3 points: [X, X+, Y+] (p1 on X axis)
CALIBRATE_FRAME_3P_P1_ORIGIN = 1 # Calibrate by 3 points: [Origin, X+, XY+] (p1 is origin)
CALIBRATE_FRAME_6P = 2 # Calibrate by 6 points
CALIBRATE_TURNTABLE = 3 # Calibrate turntable
CALIBRATE_TURNTABLE_2X = 4 # Calibrate a 2 axis turntable
# projection types (for AddCurve)
PROJECTION_NONE = 0 # No curve projection
PROJECTION_CLOSEST = 1 # The projection will be the closest point on the surface
PROJECTION_ALONG_NORMAL = 2 # The projection will be done along the normal.
PROJECTION_ALONG_NORMAL_RECALC = 3 # The projection will be done along the normal. Furthermore, the normal will be recalculated according to the surface normal.
PROJECTION_CLOSEST_RECALC = 4 # The projection will be the closest point on the surface and the normals will be recalculated
PROJECTION_RECALC = 5 # The normals are recalculated according to the surface normal of the closest projection. The points are not changed.
# Euler type
JOINT_FORMAT = -1 # Using joints (not poses)
EULER_RX_RYp_RZpp = 0 # generic
EULER_RZ_RYp_RXpp = 1 # ABB RobotStudio
EULER_RZ_RYp_RZpp = 2 # Kawasaki, Adept, Staubli
EULER_RZ_RXp_RZpp = 3 # CATIA, SolidWorks
EULER_RX_RY_RZ = 4 # Fanuc, Kuka, Motoman, Nachi
EULER_RZ_RY_RX = 5 # CRS
EULER_QUEATERNION = 6 # ABB Rapid
# State of the RoboDK window
WINDOWSTATE_HIDDEN = -1
WINDOWSTATE_SHOW = 0
WINDOWSTATE_MINIMIZED = 1
WINDOWSTATE_NORMAL = 2
WINDOWSTATE_MAXIMIZED = 3
WINDOWSTATE_FULLSCREEN = 4
WINDOWSTATE_CINEMA = 5
WINDOWSTATE_FULLSCREEN_CINEMA= 6
WINDOWSTATE_VIDEO = 7
# Instruction program call type:
INSTRUCTION_CALL_PROGRAM = 0
INSTRUCTION_INSERT_CODE = 1
INSTRUCTION_START_THREAD = 2
INSTRUCTION_COMMENT = 3
INSTRUCTION_SHOW_MESSAGE = 4
# Object selection features
FEATURE_NONE=0
FEATURE_SURFACE=1
FEATURE_CURVE=2
FEATURE_POINT=3
# Spray gun simulation:
SPRAY_OFF = 0
SPRAY_ON = 1
# Collision checking state
COLLISION_OFF = 0
COLLISION_ON = 1
# RoboDK Window Flags
FLAG_ROBODK_TREE_ACTIVE = 1
FLAG_ROBODK_3DVIEW_ACTIVE = 2
FLAG_ROBODK_LEFT_CLICK = 4
FLAG_ROBODK_RIGHT_CLICK = 8
FLAG_ROBODK_DOUBLE_CLICK = 16
FLAG_ROBODK_MENU_ACTIVE = 32
FLAG_ROBODK_MENUFILE_ACTIVE = 64
FLAG_ROBODK_MENUEDIT_ACTIVE = 128
FLAG_ROBODK_MENUPROGRAM_ACTIVE = 256
FLAG_ROBODK_MENUTOOLS_ACTIVE = 512
FLAG_ROBODK_MENUUTILITIES_ACTIVE = 1024
FLAG_ROBODK_MENUCONNECT_ACTIVE = 2048
FLAG_ROBODK_WINDOWKEYS_ACTIVE = 4096
FLAG_ROBODK_TREE_VISIBLE = 8192
FLAG_ROBODK_REFERENCES_VISIBLE = 16384
FLAG_ROBODK_STATUSBAR_VISIBLE = 32768
FLAG_ROBODK_NONE = 0x00
FLAG_ROBODK_ALL = 0xFFFF
FLAG_ROBODK_MENU_ACTIVE_ALL = FLAG_ROBODK_MENU_ACTIVE | FLAG_ROBODK_MENUFILE_ACTIVE | FLAG_ROBODK_MENUEDIT_ACTIVE | FLAG_ROBODK_MENUPROGRAM_ACTIVE | FLAG_ROBODK_MENUTOOLS_ACTIVE | FLAG_ROBODK_MENUUTILITIES_ACTIVE | FLAG_ROBODK_MENUCONNECT_ACTIVE
# RoboDK Item Flags
FLAG_ITEM_SELECTABLE = 1
FLAG_ITEM_EDITABLE = 2
FLAG_ITEM_DRAGALLOWED = 4
FLAG_ITEM_DROPALLOWED = 8
FLAG_ITEM_ENABLED = 32
FLAG_ITEM_AUTOTRISTATE = 64
FLAG_ITEM_NOCHILDREN = 128
FLAG_ITEM_USERTRISTATE = 256
FLAG_ITEM_NONE = 0
FLAG_ITEM_ALL = 64+32+8+4+2+1
# Robot types
MAKE_ROBOT_1R=1
MAKE_ROBOT_2R=2
MAKE_ROBOT_3R=3
MAKE_ROBOT_1T=4
MAKE_ROBOT_2T=5
MAKE_ROBOT_3T=6
MAKE_ROBOT_6DOF=7
MAKE_ROBOT_7DOF=8
MAKE_ROBOT_SCARA=9
# Path Error bit mask
ERROR_KINEMATIC = 0b001 # One or more points is not reachable
ERROR_PATH_LIMIT = 0b010 # The path reaches the limit of joint axes
ERROR_PATH_SINGULARITY = 0b100 # The robot reached a singularity point
ERROR_PATH_NEARSINGULARITY = 0b1000 # The robot is too close to a singularity. Lower the singularity tolerance to allow the robot to continue.
ERROR_COLLISION = 0b100000 # Collision detected
# Interactive selection option (for 3D mouse behavior and setInteractiveMode)
SELECT_RESET =-1
SELECT_NONE =0
SELECT_RECTANGLE=1
SELECT_ROTATE =2
SELECT_ZOOM =3
SELECT_PAN =4
SELECT_MOVE =5
SELECT_MOVE_SHIFT=6
SELECT_MOVE_CLEAR=7
# Bit masks to show specific reference frames and customize the display of references (for picking references with the 3D mouse and setInteractiveMode)
DISPLAY_REF_DEFAULT = -1
DISPLAY_REF_NONE = 0
DISPLAY_REF_TX = 0b001
DISPLAY_REF_TY = 0b010
DISPLAY_REF_TZ = 0b100
DISPLAY_REF_RX = 0b001000
DISPLAY_REF_RY = 0b010000
DISPLAY_REF_RZ = 0b100000
DISPLAY_REF_PXY= 0b001000000
DISPLAY_REF_PXZ= 0b010000000
DISPLAY_REF_PYZ= 0b100000000
VISIBLE_REFERENCE_DEFAULT = -1
VISIBLE_REFERENCE_ON = 1
VISIBLE_REFERENCE_OFF = 0
VISIBLE_ROBOT_NONE = 0
VISIBLE_ROBOT_FLANGE = 0x01
VISIBLE_ROBOT_AXIS_Base_3D = 0x01 << 1
VISIBLE_ROBOT_AXIS_Base_REF = 0x01 << 2
VISIBLE_ROBOT_AXIS_1_3D = 0x01 << 3
VISIBLE_ROBOT_AXIS_1_REF = 0x01 << 4
VISIBLE_ROBOT_AXIS_2_3D = 0x01 << 5
VISIBLE_ROBOT_AXIS_2_REF = 0x01 << 6
VISIBLE_ROBOT_AXIS_3_3D = 0x01 << 7
VISIBLE_ROBOT_AXIS_3_REF = 0x01 << 8
VISIBLE_ROBOT_AXIS_4_3D = 0x01 << 9
VISIBLE_ROBOT_AXIS_4_REF = 0x01 << 10
VISIBLE_ROBOT_AXIS_5_3D = 0x01 << 11
VISIBLE_ROBOT_AXIS_5_REF = 0x01 << 12
VISIBLE_ROBOT_AXIS_6_3D = 0x01 << 13
VISIBLE_ROBOT_AXIS_6_REF = 0x01 << 14
VISIBLE_ROBOT_AXIS_7_3D = 0x01 << 15
VISIBLE_ROBOT_AXIS_7_REF = 0x02 << 16
VISIBLE_ROBOT_DEFAULT = 0x2AAAAAAB
VISIBLE_ROBOT_ALL = 0x7FFFFFFF
VISIBLE_ROBOT_ALL_REFS = 0x15555555
if False:
# To be added in the future. Requires Python 3.6 or later
from enum import IntFlag
from enum import IntEnum
class InstructionListJointsFlags(IntEnum):
"""InstructionListJoints output flag"""
Position = 1
Speed = 2
SpeedAndAcceleration = 3
TimeBased = 4
class PathErrorFlags(IntFlag):
"""Error flags returned by InstructionListJoints"""
# none of the flags is set -> No Error
NoError = 0
# One or more points is not reachable
Kinematic = 0x1
# The path reaches the limit of joint axes
PathLimit = 0x2
# The robot reached a singularity point
PathSingularity = 0x4
# The robot is too close to a singularity.
# Lower the singularity tolerance to allow the robot to continue.
PathNearSingularity = 0x8
# A movement can't involve an exact rotation of 180 deg around a unique axis. The rotation is ambiguous and has infinite solutions.
PathFlipAxis = 0b10000
# Collision detected
Collision = 0x20 // 0b100000
# The robot reached a Wrist singularity: Joint 5 is too close to 0 deg
WristSingularity = 0b1000000
# The robot reached an Elbow singularity: Joint 3 is fully extended
ElbowSingularity = 0b10000000
# The robot reached a Shoulder singularity: the wrist is too close to axis 1
ShoulderSingularity = 0b100000000
def ConvertErrorCodeToJointErrorType(evalue):
"""Convert error number returned by InstructionListJoints() to PathErrorFlags"""
flags = PathErrorFlags.NoError
if (evalue % 10000000 > 999999):
# "The robot can't make a rotation so close to 180 deg. (the rotation axis is not properly defined
flags |= PathErrorFlags.PathFlipAxis
if (evalue % 1000000 > 99999):
# Collision detected.
flags |= PathErrorFlags.Collision
if (evalue % 1000 > 99):
# Joint 5 crosses 0 degrees. This is a singularity and it is not allowed for a linear move.
flags |= PathErrorFlags.WristSingularity
flags |= PathErrorFlags.PathSingularity
elif (evalue % 10000 > 999):
if (evalue % 10000 > 3999):
# The robot is too close to the front/back singularity (wrist close to axis 1).
flags |= PathErrorFlags.ShoulderSingularity
flags |= PathErrorFlags.PathSingularity
elif (evalue % 10000 > 1999):
flags |= PathErrorFlags.ElbowSingularity
flags |= PathErrorFlags.PathSingularity
# Joint 3 is too close the elbow singularity.
else:
# Joint 5 is too close to a singularity (0 degrees).
flags |= PathErrorFlags.WristSingularity
flags |= PathErrorFlags.PathSingularity
flags |= PathErrorFlags.PathNearSingularity
if (evalue % 10 > 0):
# There is no solution available to complete the path.
flags |= PathErrorFlags.PathLimit
if (evalue % 100 > 9):
# The robot can't make a linear movement because of joint limits or the target is out of reach. Consider a Joint move instead.
flags |= PathErrorFlags.PathLimit
flags |= PathErrorFlags.Kinematic
return flags
class TargetReachError(Exception):
"""Unable to reach desired target or destination error."""
pass
class StoppedError(Exception):
"""The user stopped the operation by selecting Escape key or moving the robot"""
pass
class InputError(Exception):
"""Invalid input parameters provided to the API. Provide input as stated in the documentation."""
pass
class LicenseError(Exception):
"""Invalid RoboDK license to use the requested feature."""
pass
def RoboDKInstallFound():
"""Check if RoboDK is installed"""
path_install = getPathRoboDK()
return os.path.exists(path_install)
def getPathRoboDK():
"""RoboDK's executable/binary file"""
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
# Ubuntu, Linux or Debian
return os.path.expanduser("~/RoboDK/bin/RoboDK")
elif _platform == "darwin":
# MacOS
#self.APPLICATION_DIR = "/Applications/RoboDK.app/Contents/MacOS/RoboDK"
return "~/RoboDK/RoboDK.app/Contents/MacOS/RoboDK"
else:
# Windows assumed
if sys.version_info[0] < 3:
import _winreg
else:
import winreg as _winreg
# Try to get the value from the Windows registry:
try:
#if True:
# Open the key and return the handle object.
try:
hKey = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\RoboDK", 0, _winreg.KEY_READ | _winreg.KEY_WOW64_64KEY)
except FileNotFoundError:
hKey = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\RoboDK", 0, _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY)
# Read the value.
result = _winreg.QueryValueEx(hKey, "INSTDIR")
# Close the handle object.
_winreg.CloseKey(hKey)
# Return only the value from the resulting tuple (value, type_as_int).
return result[0].replace("\\","/") + "/bin/RoboDK.exe"
except:# FileNotFoundError:
print("RoboDK was not installed properly. Install RoboDK from www.robodk.com/download.")
return "C:/RoboDK/bin/RoboDK.exe"
def getPathIcon():
iconpath = getPathRoboDK()
if iconpath.endswith(".exe"):
iconpath = iconpath[:-4]
iconpath = iconpath + '.ico'
return iconpath
def import_install(module_name, pip_name=None, rdk=None):
"""Import a module by first installing it if the corresponding package is not available. If the module name does not match the pip install command, provide the pip_name for install purposes.
Optionally, you can pass the RoboDK API Robolink object to see install progress in RoboDK's status bar.
.. code-block:: python
:caption: Example to embed a window as a docked RoboDK window
# If you want to install opencv for Python and pyserial you should use:
import_install("opencv", "opencv-python", RDK)
import_install("serial", "pyserial", RDK)
# If the name of the module matches the package you can just pass the name of the module.
# Example:
import_install("xlrd", rdk=RDK)
"""
try:
exec('import ' + module_name, globals())
return
except ImportError:
import os
import sys
import subprocess
import io
def execute(cmd):
print("Running command:")
print(cmd)
print("...")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)#, universal_newlines=True)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
# display line output
line = line.strip()
print(line)
if rdk:
rdk.ShowMessage(line, False)
if pip_name is None:
pip_name = module_name
msg = "Installing required module: " + module_name + " ..."
print(msg)
if rdk:
rdk.ShowMessage(msg, False)
try:
cmd = sys.executable + ' -m pip install ' + pip_name
#os.system(cmd)
execute(cmd)
exec('import ' + module_name, globals())
except:
msg = "Unable to load or install <strong>%s</strong>. Make sure you have internet connection and administrator privileges" % module_name
print(msg)
if rdk:
rdk.ShowMessage(msg, True)
raise Exception(msg)
def EmbedWindow(window_name, docked_name=None, size_w=-1, size_h=-1, pid=0, area_add=1, area_allowed=15, timeout=500, port=None, args=[]):
"""Embed a window from a separate process in RoboDK as a docked window. Returns True if successful.
:param str window_name: The name of the window currently open. Make sure the window name is unique and it is a top level window
:param str docked_name: Name of the docked tab in RoboDK (optional, if different from the window name)
:param int pid: Process ID (optional)
:param int area_add: Set to 1 (right) or 2 (left) (default is 1)
:param int area_allowed: Areas allowed (default is 15:no constrain)
:param int timeout: Timeout to abort attempting to embed the window
.. code-block:: python
:caption: Example to embed a window as a docked RoboDK window
from tkinter import *
from robolink import *
import threading
# Create a new window
window = tkinter.Tk()
# Close the window
def onClose():
window.destroy()
quit(0)
# Trigger Select button
# IMPORTANT: We need to run the action on a separate thread because
# (otherwise, if we want to interact with RoboDK window it will freeze)
def on_btnSelect():
def thread_btnSelect():
# Run button action (example to select an item and display its name)
RDK = Robolink()
item = RDK.ItemUserPick('Select an item')
if item.Valid():
RDK.ShowMessage("You selected the item: " + item.Name())
threading.Thread(target=thread_btnSelect).start()
# Set the window title (must be unique for the docking to work, try to be creative!)
window_title = 'RoboDK API Docked Window'
window.title(window_title)
# Delete the window when we close it
window.protocol("WM_DELETE_WINDOW", onClose)
# Add a button (Select action)
btnSelect = Button(window, text='Trigger on_btnSelect', height=5, width=60, command=on_btnSelect)
btnSelect.pack(fill=X)
# Embed the window
EmbedWindow(window_title)
# Run the window event loop. This is like an app and will block until we close the window
window.mainloop()
"""
import threading
def t_dock(wname, dname, sz_w, sz_h, p, a_add, a_allowed, tout):
# it is important to run this on a parallel thread to not block the main window events in Python
rdk = Robolink(port=port, args=args)
if rdk.EmbedWindow(wname, dname, sz_w, sz_h, p, a_add, a_allowed, tout):
print("Window docked successfully: " + window_name)
else:
print("Failed to dock window: " + window_name)
t = threading.Thread(target=t_dock, args = (window_name, docked_name, size_w, size_h, pid, area_add, area_allowed, timeout))
t.start()
class Robolink:
"""The Robolink class is the link to to RoboDK and allows creating macros for Robodk, simulate applications and generate programs offline.
Any interaction is made through \"items\" (Item() objects). An item is an object in the
robodk tree (it can be either a robot, an object, a tool, a frame, a
program, ...).
:param str robodk_ip: IP of the RoboDK API server (default='localhost')
:param int port: Port of the RoboDK API server (default=None, it will use the default value)
:param list args: Command line arguments to pass to RoboDK on startup (for example: '/NOSPLASH /NOSHOW' should be passed as args=['/NOSPLASH','/NOSHOW'] to not display RoboDK). Arguments have no effect if RoboDK is already running.\n
For more information: `RoboDK list of arguments on startup <https://robodk.com/doc/en/RoboDK-API.html#CommandLine>`_.
:param str robodk_path: RoboDK installation path. It defaults to RoboDK's default path (C:/RoboDK/bin/RoboDK.exe on Windows or /Applications/RoboDK.app/Contents/MacOS/RoboDK on Mac)
.. code-block:: python
:caption: Example of a RoboDK API initialization
from robolink import *
# Connect to the RoboDK API
RDK = Robolink()
# Retrieve all items and print their names
list_items = RDK.ItemList()
for item in list_items:
print(item.Name())
.. code-block:: python
:caption: Force starting a new RoboDK hidden instance and output debug information
from robolink import *
# Connect to the RoboDK API
RDK = Robolink(args=["-NEWINSTANCE", "-NOUI", "-SKIPINI", "-EXIT_LAST_COM"])
# Add a reference frame
RDK.AddFrame("My reference frame")
RDK.setPose(transl(100,200,300) * rotz(pi/2))
# Retrieve all items and print their names (just a reference frame)
list_items = RDK.ItemList()
for item in list_items:
print(item.Name())
# Close RoboDK
RDK.CloseRoboDK()
# Example command line arguments:
# -NEWINSTANCE: Forces using a new instance
# -NOUI: Run RoboDK behind the scenes (without OpenGL context)
# -SKIPINI: Skip using RoboDK's INI settings (global settings), this provides a faster startup
# -EXIT_LAST_COM: Exit RoboDK when the last API client connected closes
# -DEBUG: Run in debug mode (outputs information in the console)
#
# Follow these steps to see an extended list of command line arguments:
# 1- Select Tools-Run Script
# 2- Select ShowCommands
#
# More information here:
# https://robodk.com/doc/en/RoboDK-API.html#CommandLine
.. seealso:: :func:`~robolink.Robolink.Item`, :func:`~robolink.Item.Name`, :func:`~robolink.Item.setPose`, :func:`~robolink.Robolink.CloseRoboDK`
.. seealso:: :func:`~robolink.Robolink.AddFile`, :func:`~robolink.Robolink.AddFrame`, :func:`~robolink.Robolink.AddTarget`, :func:`~robolink.Robolink.AddProgram`
"""
# checks that provided items exist in memory and poses are homogeneous
SAFE_MODE = 1
# if AUTO_UPDATE is 1, updating and rendering objects the 3D the scene will be delayed until 100 ms after the last call (this value can be changed in Tools-Options-Other-API Render delay, or also using the RoboDK.Command('AutoRenderDelay', value) and RoboDK.Command('AutoRenderDelayMax', value)
AUTO_UPDATE = 0
# IP address of the simulator (localhost if it is the same computer), otherwise, use RL = Robolink('yourip') to set to a different IP
IP = 'localhost'
# port to start looking for the RoboDK API connection (Tools-Options-Other-RoboDK API)
PORT_START = 20500
# port to stop looking for the RoboDK API connection
PORT_END = 20500
# timeout for communication, in seconds
TIMEOUT = 10
# activate nodelay option (faster, requires more resources)
NODELAY = False
# file path to the robodk program (executable). As an example, on Windows it should be: C:/RoboDK/bin/RoboDK.exe
APPLICATION_DIR = ''
DEBUG = False # Debug output through console
COM = None # tcpip com
ARGUMENTS = [] # Command line arguments to RoboDK, such as /NOSPLASH /NOSHOW to not display RoboDK. It has no effect if RoboDK is already running.
PORT = -1 # current port
BUILD = 0 # This variable holds the build id and is used for version checking
# Remember last status message
LAST_STATUS_MESSAGE = ''
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def _setTimeout(self, timeout_sec=30):
"""Set the communication timeout (in seconds)."""
# Change the default timeout here, in seconds:
self.TIMEOUT = timeout_sec # in seconds
self.COM.settimeout(self.TIMEOUT)
def _is_connected(self):
"""Returns 1 if connection is valid, returns 0 if connection is invalid"""
if not self.COM: return 0
connected = 1
#try:
# self.COM.settimeout(0)
# check = self.COM.recv(1)
#except:
# connected = 0
#
#self.COM.settimeout(self.TIMEOUT)
return connected
def _check_connection(self):
"""If we are not connected it will attempt a connection, if it fails, it will throw an error"""
if not self._is_connected() and self.Connect() < 1:
raise Exception('Unable to connect')
#To do: Clear input buffer.
def _check_status(self):
"""This procedure checks the status of the connection"""
status = self._rec_int()
if status == 0:
# everything is OK
self.LAST_STATUS_MESSAGE = ''
elif status > 0 and status < 10:
self.LAST_STATUS_MESSAGE = 'Unknown error'
if status == 1:
self.LAST_STATUS_MESSAGE = 'Invalid item provided: The item identifier provided is not valid or it does not exist.'
elif status == 2: #output warning
self.LAST_STATUS_MESSAGE = self._rec_line()
print('WARNING: ' + self.LAST_STATUS_MESSAGE)
#warn(self.LAST_STATUS_MESSAGE)# does not show where is the problem...
return 0
elif status == 3: #output error
self.LAST_STATUS_MESSAGE = self._rec_line()
raise Exception(self.LAST_STATUS_MESSAGE)
elif status == 9:
self.LAST_STATUS_MESSAGE = 'Invalid license. Purchase a license online (www.robodk.com) or contact us at info@robodk.com.'
print(self.LAST_STATUS_MESSAGE)
raise Exception(self.LAST_STATUS_MESSAGE)
elif status < 100:
# Since RoboDK 4.0 we raise dedicated errors
self.LAST_STATUS_MESSAGE = self._rec_line()
if status == 10:
raise TargetReachError(self.LAST_STATUS_MESSAGE)
elif status == 11:
raise StoppedError(self.LAST_STATUS_MESSAGE)
elif status == 12:
raise InputError(self.LAST_STATUS_MESSAGE)
elif status == 13:
raise LicenseError(self.LAST_STATUS_MESSAGE)
else:
# Generic error exception
raise Exception(self.LAST_STATUS_MESSAGE)
else:
self.LAST_STATUS_MESSAGE = 'Problems running function'
raise Exception(self.LAST_STATUS_MESSAGE)
return status
def _check_color(self, color):
"""Formats the color in a vector of size 4x1 and ranges [0,1]"""
if not isinstance(color,list) or len(color) < 3 or len(color) > 4:
raise Exception('The color vector must be a list of 3 or 4 values')
if len(color) == 3:
color.append(1)
if max(color) > 1 or min(color) < -1:
print("WARNING: Color provided is not in the range [0,1] ([r,g,b,a])")
return color
def _send_line(self, string=None):
"""Sends a string of characters with a \\n"""
string = string.replace('\n','<br>')
if sys.version_info[0] < 3:
self.COM.send(bytes(string+'\n')) # Python 2.x only
else:
self.COM.send(bytes(string+'\n','utf-8')) # Python 3.x only
def _rec_line(self):
"""Receives a string. It reads until if finds LF (\\n)"""
string = b''
chari = self.COM.recv(1)
while chari != b'\n': # read until LF
string = string + chari
chari = self.COM.recv(1)
return str(string.decode('utf-8')) # python 2 and python 3 compatible
#string = ''
#chari = self.COM.recv(1).decode('utf-8')
#while chari != '\n': # read until LF
# string = string + chari
# chari = self.COM.recv(1).decode('utf-8')
#return str(string) # python 2 and python 3 compatible
def _send_item(self, item):
"""Sends an item pointer"""
if isinstance(item, Item):
self.COM.send(struct.pack('>Q',item.item))#q=unsigned long long (64 bits), d=float64
return
if item is None:
item = 0
self.COM.send(struct.pack('>Q',item))#q=unsigned long long (64 bits), d=float64
def _rec_item(self):
"""Receives an item pointer"""
buffer = self.COM.recv(8)
item = struct.unpack('>Q',buffer)#q=unsigned long long (64 bits), d=float64
buffer2 = self.COM.recv(4)
itemtype = struct.unpack('>i',buffer2)
return Item(self,item[0], itemtype[0])
def _send_bytes(self, data):
"""Sends a byte array"""
if isinstance(data,str):
data = bytes(data,'utf-8')
if not isinstance(data,bytes):
data = bytes(data)
self.COM.send(struct.pack('>I',len(data)))#q=unsigned long long (64 bits), d=float64
self.COM.send(data)
def _rec_bytes(self):
"""Receives a byte array"""
buffer = self.COM.recv(4)
bytes_len = struct.unpack('>I',buffer)[0]#q=unsigned long long (64 bits), d=float64
data = b''
bytes_remaining = bytes_len
while bytes_remaining > 0:
data += self.COM.recv(bytes_remaining)
bytes_remaining = bytes_len - len(data)
return data
def _send_ptr(self, ptr_h):
"""Sends a generic pointer"""
self.COM.send(struct.pack('>Q',ptr_h))#q=unsigned long long (64 bits), d=float64
def _rec_ptr(self):
"""Receives a generic pointer"""
buffer = self.COM.recv(8)
ptr_h = struct.unpack('>Q',buffer)#q=unsigned long long (64 bits), d=float64
return ptr_h[0] #return ptr_h
def _send_pose(self, pose):
"""Sends a pose (4x4 matrix)"""
if not pose.isHomogeneous():
print("Warning: pose is not homogeneous!")
print(pose)
posebytes = b''
for j in range(4):
for i in range(4):
posebytes = posebytes + struct.pack('>d',pose[i,j])
self.COM.send(posebytes)
def _rec_pose(self):
"""Receives a pose (4x4 matrix)"""
posebytes = self.COM.recv(16*8)
posenums = struct.unpack('>16d',posebytes)
pose = Mat(4,4)
cnt = 0
for j in range(4):
for i in range(4):
pose[i,j] = posenums[cnt]
cnt = cnt + 1
return pose
def _send_xyz(self, pos):
"""Sends an xyz vector"""
posbytes = b''
for i in range(3):
posbytes = posbytes + struct.pack('>d',pos[i])
self.COM.send(posbytes)
def _rec_xyz(self):
"""Receives an xyz vector"""
posbytes = self.COM.recv(3*8)
posnums = struct.unpack('>3d',posbytes)
pos = [0,0,0]
for i in range(3):
pos[i] = posnums[i]
return pos
def _send_int(self, num):
"""Sends an int (32 bits)"""
if isinstance(num, float):
num = round(num)
elif not isinstance(num, int):
num = num[0]
self.COM.send(struct.pack('>i',num))
def _rec_int(self):
"""Receives an int (32 bits)"""
buffer = self.COM.recv(4)
num = struct.unpack('>i',buffer)
return num[0]
def _send_array(self, values):
"""Sends an array of doubles"""
if not isinstance(values,list):#if it is a Mat() with joints
values = (values.tr()).rows[0]
nval = len(values)
self._send_int(nval)
if nval > 0:
buffer = b''
for i in range(nval):
buffer = buffer + struct.pack('>d',values[i])
self.COM.send(buffer)
def _rec_array(self):
"""Receives an array of doubles"""
nvalues = self._rec_int()
if nvalues > 0:
buffer = self.COM.recv(8*nvalues)
values = list(struct.unpack('>'+str(nvalues)+'d',buffer))
#values = fread(self.COM, nvalues, 'double')
else:
values = [0]
return Mat(values)
def _send_matrix(self, mat):
"""Sends a 2 dimensional matrix (nxm)"""
if mat is None:
self._send_int(0)
self._send_int(0)
return
if type(mat) == list:
mat = Mat(mat).tr()
size = mat.size()
self._send_int(size[0])
self._send_int(size[1])
for j in range(size[1]):
matbytes = b''
for i in range(size[0]):
matbytes = matbytes + struct.pack('>d',mat[i,j])
self.COM.send(matbytes)
def _rec_matrix(self):
"""Receives a 2 dimensional matrix (nxm)"""
size1 = self._rec_int()
size2 = self._rec_int()
recvsize = size1*size2*8
BUFFER_SIZE = 512
if recvsize > 0:
matbytes = b''
to_receive = min(recvsize, BUFFER_SIZE)
while to_receive > 0:
matbytes += self.COM.recv(to_receive)
to_receive = min(recvsize - len(matbytes), BUFFER_SIZE)
matnums = struct.unpack('>'+str(size1*size2)+'d',matbytes)
mat = Mat(size1,size2)
cnt = 0
for j in range(size2):
for i in range(size1):
#mat[i,j] = matnums[cnt]
mat.rows[i][j] = matnums[cnt]
cnt = cnt + 1
else:
mat = Mat(0,0)
return mat
def _moveX(self, target, itemrobot, movetype, blocking=True):
"""Performs a linear or joint movement. Use MoveJ or MoveL instead."""
#self._check_connection();
itemrobot.WaitMove()# checks connection
if blocking:
command = 'MoveXb'
else:
command = 'MoveX'
self._send_line(command)
self._send_int(movetype)
if isinstance(target,Item):# target is an item
self._send_int(3)
self._send_array([])
self._send_item(target)
elif isinstance(target,list) or target.size() != (4,4):# target are joints
self._send_int(1)
self._send_array(target)
self._send_item(0)
elif target.size() == (4,4): # target is a pose
self._send_int(2)
mattr = target.tr()
self._send_array(mattr.rows[0]+mattr.rows[1]+mattr.rows[2]+mattr.rows[3])
self._send_item(0)
else:
raise Exception('Invalid input values')
self._send_item(itemrobot)
self._check_status()
if blocking:
#itemrobot.WaitMove()
self.COM.settimeout(360000)
self._check_status()#will wait here
self.COM.settimeout(self.TIMEOUT)
def MoveC(self, target1, target2, itemrobot, blocking=True):
"""Performs a circular movement. Use robot.MoveC instead."""
#self._check_connection();
itemrobot.WaitMove()# checks connection
if blocking:
command = 'MoveCb'
else:
command = 'MoveC'
self._send_line(command)
self._send_int(3)
if isinstance(target1,Item):# target1 is an item
self._send_int(3)
self._send_array([])
self._send_item(target1)
elif isinstance(target1,list) or target1.size() != (4,4):# target1 are joints
self._send_int(1)
self._send_array(target1)
self._send_item(0)
elif target1.size() == (4,4): # target1 is a pose
self._send_int(2)
mattr = target1.tr()
self._send_array(mattr.rows[0]+mattr.rows[1]+mattr.rows[2]+mattr.rows[3])
self._send_item(0)
else:
raise Exception('Invalid input value for target 1')
if isinstance(target2,Item):# target1 is an item
self._send_int(3)
self._send_array([])
self._send_item(target2)
elif isinstance(target2,list) or target2.size() != (4,4):# target2 are joints
self._send_int(1)
self._send_array(target2)
self._send_item(0)
elif target2.size() == (4,4): # target2 is a pose
self._send_int(2)
mattr = target2.tr()
self._send_array(mattr.rows[0]+mattr.rows[1]+mattr.rows[2]+mattr.rows[3])
self._send_item(0)
else:
raise Exception('Invalid input value for target 2')
self._send_item(itemrobot)
self._check_status()
if blocking:
#itemrobot.WaitMove()
self.COM.settimeout(360000)
self._check_status()#will wait here
self.COM.settimeout(self.TIMEOUT)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def __init__(self, robodk_ip='localhost', port=None, args=[], robodk_path=None):
"""A connection is attempted upon creation of the object
In 1 (optional) : robodk_ip -> IP of the RoboDK API server (default='localhost')
In 2 (optional) : port -> Port of the RoboDK API server (default=None)
In 3 (optional) : args -> Command line arguments, as a list, to pass to RoboDK on startup (such as ['/NOSPLASH','/NOSHOW']), to not display RoboDK. It has no effect if RoboDK is already running.
In 4 (optional) : robodk_path -> RoboDK path. Leave it to the default None for the default path (C:/RoboDK/bin/RoboDK.exe).
"""
if type(args) is str:
args = [args]
self.IP = robodk_ip
self.ARGUMENTS = args
if robodk_path is not None:
self.APPLICATION_DIR = robodk_path
else:
self.APPLICATION_DIR = getPathRoboDK()
if ('/API_NODELAY' in self.ARGUMENTS or '-API_NODELAY' in self.ARGUMENTS):
self.NODELAY = True
if port is not None:
self.PORT_START = port
self.PORT_END = port
self.ARGUMENTS.append("-PORT=%i" % port)
elif ('/NEWINSTANCE' in self.ARGUMENTS or '-NEWINSTANCE' in self.ARGUMENTS):
from socket import socket
with socket() as s:
s.bind(('',0))
port = s.getsockname()[1]
print("Using available port %i" % port)
self.PORT_START = port
self.PORT_END = port
self.ARGUMENTS.append("-PORT=%i" % port)
if "-DEBUG" in self.ARGUMENTS or "/DEBUG" in self.ARGUMENTS:
self.DEBUG = True
elif self.DEBUG:
ARGUMENTS.append("-DEBUG")
self.Connect()
def _verify_connection(self):
"""Verify that we are connected to the RoboDK API server"""
use_new_version = True
if use_new_version:
self._send_line('RDK_API')
self._send_array([self.SAFE_MODE, self.AUTO_UPDATE])
response = self._rec_line()
ver_api = self._rec_int()
self.BUILD = self._rec_int()
self._check_status()
return response == 'RDK_API'
else:
self._send_line('CMD_START')
self._send_line(str(self.SAFE_MODE) + ' ' + str(self.AUTO_UPDATE))
#fprintf(self.COM, sprintf('%i %i'), self.SAFE_MODE, self.AUTO_UPDATE))# appends LF
response = self._rec_line()
if response == 'READY':
ok = 1
else:
ok = 0
return ok
def _require_build(self, build_required):
if self.BUILD == 0:
# unknown build number. Use new API hello command
return True
if self.BUILD < build_required:
raise Exception("This function is unavailable. Update RoboDK to use this function through the API: https://robodk.com/download")
return True
def Disconnect(self):
"""Stops the communication with RoboDK. If setRunMode is set to RUNMODE_MAKE_ROBOTPROG for offline programming, any programs pending will be generated."""
self.COM.close()
def Finish(self):
"""Stops the communication with RoboDK. If setRunMode is set to RUNMODE_MAKE_ROBOTPROG for offline programming, any programs pending will be generated.
.. seealso:: :func:`~robolink.Robolink.setRunMode`, :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Robolink.ProgramStart`"""
self.Disconnect()
def NewLink(self):
"""Reconnect the API using a different communication link."""
try:
#if True:
import socket
#self.COM.close()
self.COM = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.NODELAY:
self.COM.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.COM.connect((self.IP, self.PORT))
connected = self._is_connected()
if connected > 0:
self._verify_connection()
self.COM.settimeout(self.TIMEOUT)
else:
print("Failed to reconnect (1)")
except:
print("Failed to reconnect (2)")
def Connect(self):
"""Establish a connection with RoboDK. If RoboDK is not running it will attempt to start RoboDK from the default installation path (otherwise APPLICATION_DIR must be set properly).
If the connection succeeds it returns 1, otherwise it returns 0"""
def start_robodk(command):
print('Starting %s\n' % self.APPLICATION_DIR)
import subprocess
#import time
#tstart = time.time()
def output_reader(proc):
for line in iter(proc.stdout.readline, b''):
ln = str(line.decode("utf-8")).strip()
print(ln)
from sys import platform as _platform
p = None
if (_platform == "linux" or _platform == "linux2") and os.path.splitext(command[0])[1] == ".sh":
p = subprocess.Popen(command, shell=True, executable='/bin/bash', stdout=subprocess.PIPE)
else:
p = subprocess.Popen(command,stdout=subprocess.PIPE)
while True:
line = str(p.stdout.readline().decode("utf-8")).strip()
print(line)
if 'running' in line.lower():
#telapsed = time.time() - tstart
#print("RoboDK startup time: %.3f" % telapsed)
break
#if self.DEBUG:
# Important! Make sure we consume stdout (at least in Debug mode)
import threading
t = threading.Thread(target=output_reader, args=(p,))
t.start()
#with subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
# self._ProcessID = p.pid
# for line in p.stdout:
# line_ok = line.strip()
# print(line_ok)
# if 'running' in line_ok.lower():
# print("RoboDK is running")
# return #does not return!!
import socket
connected = 0
for i in range(2):
for port in range(self.PORT_START,self.PORT_END+1):
# Prevent warning message by closing the previous socket
if self.COM:
self.COM.close()
self.COM = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.NODELAY:
self.COM.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.COM.settimeout(1)
try:
self.COM.connect((self.IP, port))
connected = self._is_connected()
if connected > 0:
self.COM.settimeout(self.TIMEOUT)
break
except:
connected = connected
if connected > 0:# if status is closed, try to open application
self.PORT = port
break
elif i == 0:
if self.IP != 'localhost':
break
try:
if self.APPLICATION_DIR == '':
connected = 0
return connected
command = [self.APPLICATION_DIR] + self.ARGUMENTS
start_robodk(command)
#import time
#time.sleep(5) # wait for RoboDK to start and check network license.
except:
raise Exception('Application path is not correct or could not start: ' + self.APPLICATION_DIR)
if connected > 0 and not self._verify_connection():
connected = 0
return connected
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# public methods
def Item(self, name, itemtype=None):
"""Returns an item by its name. If there is no exact match it will return the last closest match.
Specify what type of item you are looking for with itemtype. This is useful if 2 items have the same name but different type.
(check variables ITEM_TYPE_*)
:param str name: name of the item (name of the item shown in the RoboDK station tree)
:param int itemtype: type of the item to be retrieved (avoids confusion if there are similar name matches). Use ITEM_TYPE_*.
.. code-block:: python
:caption: Available Item types
ITEM_TYPE_STATION=1 # station item (.rdk files)
ITEM_TYPE_ROBOT=2 # robot item (.robot files)
ITEM_TYPE_FRAME=3 # reference frame item
ITEM_TYPE_TOOL=4 # tool item (.tool files or tools without geometry)
ITEM_TYPE_OBJECT=5 # object item (.stl, .step, .iges, ...)
ITEM_TYPE_TARGET=6 # target item
ITEM_TYPE_PROGRAM=8 # program item (made using the GUI)
ITEM_TYPE_PROGRAM_PYTHON=10 # Python program or macro
.. seealso:: :func:`~robolink.Robolink.ItemList`, :func:`~robolink.Robolink.ItemUserPick`
.. seealso:: :func:`~robolink.Item.Name`, :func:`~robolink.Item.Pose`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setParent`, :func:`~robolink.Item.setJoints`, :func:`~robolink.Item.MoveJ`, :func:`~robolink.Item.MoveL`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
tool = RDK.Item('Tool') # Retrieve an item named tool
robot = RDK.Item('', ITEM_TYPE_ROBOT) # the first available robot
"""
if type(name) is not str:
raise Exception("Invalid name: provide a name as a string. Item names are visible in the RoboDK tree.")
self._check_connection()
if itemtype is None:
command = 'G_Item'
self._send_line(command)
self._send_line(name)
else:
command = 'G_Item2'
self._send_line(command)
self._send_line(name)
self._send_int(itemtype)
item = self._rec_item()# item = fread(com, 2, 'ulong');% ulong is 32 bits!!!
self._check_status()
return item
def ItemList(self, filter=None, list_names=False):
"""Returns a list of items (list of name or pointers) of all available items in the currently open station of RoboDK.
:param int filter: (optional) Filter the list by a specific item type (ITEM_TYPE_*). For example: RDK.ItemList(filter = ITEM_TYPE_ROBOT)
:param int list_names: (optional) Set to True to return a list of names instead of a list of :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.Item`, :func:`~robolink.Robolink.ItemUserPick`
"""
self._check_connection()
retlist = []
if list_names:
if filter is None:
command = 'G_List_Items'
self._send_line(command)
else:
command = 'G_List_Items_Type'
self._send_line(command)
self._send_int(filter)
count = self._rec_int()
for i in range(count):
namei = self._rec_line()
retlist.append(namei)
else:
if filter is None:
command = 'G_List_Items_ptr'
self._send_line(command)
else:
command = 'G_List_Items_Type_ptr'
self._send_line(command)
self._send_int(filter)
count = self._rec_int()
for i in range(count):
itemi = self._rec_item()
retlist.append(itemi)
self._check_status()
return retlist
def ItemUserPick(self, message="Pick one item", itemtype=None):
"""Shows a RoboDK popup to select one object from the open station.
An item type can be specified to filter desired items. If no type is specified, all items are selectable.
(check variables ITEM_TYPE_*)
Example:
.. code-block:: python
RDK.ItemUserPick("Pick a robot", ITEM_TYPE_ROBOT)
:param str message: message to display
:param int itemtype: filter choices by a specific item type (ITEM_TYPE_*)
.. seealso:: :func:`~robolink.Robolink.Item`, :func:`~robolink.Robolink.ItemList`
"""
self._check_connection()
if itemtype is None:
itemtype = -1
command = 'PickItem'
self._send_line(command)
self._send_line(message)
self._send_int(itemtype)
self.COM.settimeout(3600) # wait up to 1 hour for user input
item = self._rec_item()
self.COM.settimeout(self.TIMEOUT)
self._check_status()
return item
def ShowRoboDK(self):
"""Show or raise the RoboDK window
.. seealso:: :func:`~robolink.Robolink.setWindowState`"""
self._check_connection()
command = 'RAISE'
self._send_line(command)
self._check_status()
def HideRoboDK(self):
"""Hide the RoboDK window. RoboDK will keep running as a process
.. seealso:: :func:`~robolink.Robolink.setWindowState`"""
self._check_connection()
command = 'HIDE'
self._send_line(command)
self._check_status()
def CloseRoboDK(self):
"""Close RoboDK window and finish RoboDK's execution."""
self._check_connection()
command = 'QUIT'
self._send_line(command)
self._check_status()
def Version(self):
"""Close RoboDK window and finish RoboDK's execution."""
self._check_connection()
command = 'Version'
self._send_line(command)
app_name = self._rec_line()
bit_arch = self._rec_int()
ver4 = self._rec_line()
date_build = self._rec_line()
self._check_status()
return ver4
def setWindowState(self, windowstate=WINDOWSTATE_NORMAL):
"""Set the state of the RoboDK window
:param int windowstate: state of the window (WINDOWSTATE_*)
.. code-block:: python
:caption: Allowed window states
WINDOWSTATE_HIDDEN = -1 # Hidden
WINDOWSTATE_SHOW = 0 # Visible
WINDOWSTATE_MINIMIZED = 1 # Minimize window
WINDOWSTATE_NORMAL = 2 # Show normal window (last known state)
WINDOWSTATE_MAXIMIZED = 3 # Show maximized window
WINDOWSTATE_FULLSCREEN = 4 # Show fulscreen window
WINDOWSTATE_CINEMA = 5 # Show maximized window without the toolbar and without the menu
WINDOWSTATE_FULLSCREEN_CINEMA= 6 # Show fulscreen window without the toolbar and without the menu
.. seealso:: :func:`~robolink.Robolink.setFlagsRoboDK`
"""
self._check_connection()
command = 'S_WindowState'
self._send_line(command)
self._send_int(windowstate)
self._check_status()
def setFlagsRoboDK(self, flags=FLAG_ROBODK_ALL):
"""Update the RoboDK flags. RoboDK flags allow defining how much access the user has to RoboDK features. Use a FLAG_ROBODK_* variables to set one or more flags.
:param int flags: state of the window (FLAG_ROBODK_*)
.. code-block:: python
:caption: Allowed RoboDK flags
FLAG_ROBODK_TREE_ACTIVE = 1 # Enable the tree
FLAG_ROBODK_3DVIEW_ACTIVE = 2 # Enable the 3D view (3D mouse navigation)
FLAG_ROBODK_LEFT_CLICK = 4 # Enable left clicks
FLAG_ROBODK_RIGHT_CLICK = 8 # Enable right clicks
FLAG_ROBODK_DOUBLE_CLICK = 16 # Enable double clicks
FLAG_ROBODK_MENU_ACTIVE = 32 # Enable the main menu (complete menu)
FLAG_ROBODK_MENUFILE_ACTIVE = 64 # Enable the File menu
FLAG_ROBODK_MENUEDIT_ACTIVE = 128 # Enable the Edit menu
FLAG_ROBODK_MENUPROGRAM_ACTIVE = 256 # Enable the Program menu
FLAG_ROBODK_MENUTOOLS_ACTIVE = 512 # Enable the Tools menu
FLAG_ROBODK_MENUUTILITIES_ACTIVE = 1024 # Enable the Utilities menu
FLAG_ROBODK_MENUCONNECT_ACTIVE = 2048 # Enable the Connect menu
FLAG_ROBODK_WINDOWKEYS_ACTIVE = 4096 # Enable the keyboard
FLAG_ROBODK_TREE_VISIBLE = 8192 # Make the station tree visible
FLAG_ROBODK_REFERENCES_VISIBLE = 16384 # Make the reference frames visible
FLAG_ROBODK_NONE = 0 # Disable everything
FLAG_ROBODK_ALL = 0xFFFF # Enable everything
FLAG_ROBODK_MENU_ACTIVE_ALL # Enable the menu only
.. seealso:: :func:`~robolink.Robolink.setFlagsItem`, :func:`~robolink.Robolink.setWindowState`
"""
self._check_connection()
command = 'S_RoboDK_Rights'
self._send_line(command)
self._send_int(flags)
self._check_status()
def setFlagsItem(self, item, flags=FLAG_ITEM_ALL):
"""Update item flags. Item flags allow defining how much access the user has to item-specific features. Use FLAG_ITEM_* flags to set one or more flags.
:param item: item to set (set to 0 to apply to all items)
:type item: :class:`Item`
:param flags: set the item flags (FLAG_ITEM_*)
:type flags: int
.. seealso:: :func:`~robolink.Robolink.getFlagsItem`, :func:`~robolink.Robolink.setFlagsRoboDK`, :func:`~robolink.Robolink.setWindowState`"""
self._check_connection()
command = 'S_Item_Rights'
self._send_line(command)
self._send_item(item)
self._send_int(flags)
self._check_status()
def getFlagsItem(self, item):
"""Retrieve current item flags. Item flags allow defining how much access the user has to item-specific features. Use FLAG_ITEM_* flags to set one or more flags.
:param item: item to get flags
:type item: :class:`Item`
.. code-block:: python
:caption: Allowed RoboDK flags
FLAG_ITEM_SELECTABLE = 1 # Allow selecting the item
FLAG_ITEM_EDITABLE = 2 # Allow editing the item
FLAG_ITEM_DRAGALLOWED = 4 # Allow dragging the item
FLAG_ITEM_DROPALLOWED = 8 # Allow dropping nested items
FLAG_ITEM_ENABLED = 32 # Enable this item in the tree
FLAG_ITEM_NONE = 0 # Disable everything
FLAG_ITEM_ALL = 64+32+8+4+2+1 # Enable everything
.. seealso:: :func:`~robolink.Robolink.setFlagsItem`, :func:`~robolink.Robolink.setFlagsRoboDK`, :func:`~robolink.Robolink.setWindowState`
"""
self._check_connection()
command = 'S_Item_Rights'
self._send_line(command)
self._send_item(item)
flags = self._red_int()
self._check_status()
return flags
def ShowMessage(self, message, popup=True):
"""Show a message from the RoboDK window. By default, the message will be a blocking popup. Alternatively, it can be a message displayed at the bottom of RoboDK's main window.
:param str message: message to display
:param bool popup: Set to False to display the message in the RoboDK's status bar (not blocking)
"""
self._check_connection()
if popup:
command = 'ShowMessage'
self._send_line(command)
self._send_line(message)
self.COM.settimeout(3600) # wait up to 1 hour user to hit OK
self._check_status()
self.COM.settimeout(self.TIMEOUT)
else:
command = 'ShowMessageStatus'
self._send_line(command)
self._send_line(message)
self._check_status()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def Copy(self, item):
"""Makes a copy of an item (same as Ctrl+C), which can be pasted (Ctrl+V) using Paste().
:param item: Item to copy to the clipboard
:type item: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.Paste`, Item. :func:`~robolink.Item.Copy`
Example:
.. code-block:: python
RDK = Robolink()
object = RDK.Item('My Object')
object.Copy() # same as RDK.Copy(object) also works
object_copy1 = RDK.Paste()
object_copy1.setName('My Object (copy 1)')
object_copy2 = RDK.Paste()
object_copy2.setName('My Object (copy 2)')
"""
self._check_connection()
command = 'Copy'
self._send_line(command)
self._send_item(item)
self._check_status()
def Paste(self, paste_to=0, paste_times=1):
"""Paste the copied item as a dependency of another item (same as Ctrl+V). Paste should be used after Copy(). It returns the newly created item.
:param paste_to: Item to attach the copied item (optional)
:type paste_to: :class:`.Item`
:param int paste_times: number of times to paste the item (returns a list if greater than 1)
:return: New item created
:rtype: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.Copy`
"""
if paste_times > 1:
self._require_build(10500)
self._check_connection()
command = 'PastN'
self._send_line(command)
self._send_item(paste_to)
self._send_int(paste_times)
ntimes = self._rec_int()
list_items = []
for i in range(ntimes):
newitem = self._rec_item()
list_items.append(newitem)
self._check_status()
return list_items
else:
self._check_connection()
command = 'Paste'
self._send_line(command)
self._send_item(paste_to)
newitem = self._rec_item()
self._check_status()
return newitem
def AddFile(self, filename, parent=0):
"""Load a file and attach it to parent (if provided). The call returns the newly added :class:`.Item`. If the new file is an object and it is attached to a robot it will be automatically converted to a tool.
:param str filename: any file to load, supported by RoboDK. Supported formats include STL, STEP, IGES, ROBOT, TOOL, RDK,... It is also possible to load supported robot programs, such as SRC (KUKA), SCRIPT (Universal Robots), LS (Fanuc), JBI (Motoman), MOD (ABB), PRG (ABB), ...
:param parent: item to attach the newly added object (optional)
:type parent: :class:`.Item`
Example:
.. code-block:: python
RDK = Robolink()
item = RDK.AddFile(r'C:\\Users\\Name\\Desktop\\object.step')
item.setPose(transl(100,50,500))
# Add a tool to an existing robot:
tool = RDK.AddFile(r'C:\\Users\\Name\\Desktop\\robot-tool.stl', robot)
tool.setPoseTool(transl(100,50,500))
# Add a reference frame, move it and add an object to that reference frame (locally):
frame = AddFrame('Reference A')
frame.setPose(transl(100,200,300))
new_object = RDK.Addfile('path-to-object.stl', frame)
.. seealso:: :func:`~robolink.Robolink.Save`, :func:`~robolink.Robolink.AddFrame`, :func:`~robolink.Robolink.AddTool`, :func:`~robolink.Robolink.Copy`, :func:`~robolink.Robolink.Paste`
"""
self._check_connection()
command = 'Add'
self._send_line(command)
self._send_line(filename)
self._send_item(parent)
newitem = self._rec_item()
self._check_status()
return newitem
def AddShape(self, triangle_points, add_to=0, override_shapes = False):
"""Adds a shape provided triangle coordinates. Triangles must be provided as a list of vertices. A vertex normal can be provided optionally.
:param triangle_points: List of vertices grouped by triangles.
:type triangle_points: :class:`robodk.Mat` (3xN or 6xN matrix, N must be multiple of 3 because vertices must be stacked by groups of 3)
:param parent: item to attach the newly added geometry (optional)
:type parent: :class:`.Item`
:param override_shapes: Set to True to fill the object with a new shape
:type override_shapes: bool
:return: added object/shape (0 if failed)
:rtype: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddCurve`, :func:`~robolink.Robolink.AddPoints`
"""
if isinstance(triangle_points,list):
triangle_points = tr(Mat(triangle_points))
elif not isinstance(triangle_points, Mat):
raise Exception("triangle_points must be a 3xN or 6xN list or matrix")
self._check_connection()
command = 'AddShape2'
self._send_line(command)
self._send_matrix(triangle_points)
self._send_item(add_to)
self._send_int(1 if override_shapes else 0)
newitem = self._rec_item()
self._check_status()
return newitem
def AddCurve(self, curve_points, reference_object=0, add_to_ref=False, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Adds a curve provided point coordinates. The provided points must be a list of vertices. A vertex normal can be provided optionally.
:param curve_points: List of points defining the curve
:type curve_points: :class:`robodk.Mat` (3xN matrix, or 6xN to provide curve normals as ijk vectors)
:param reference_object: item to attach the newly added geometry (optional)
:type reference_object: :class:`.Item`
:param bool add_to_ref: If True, the curve will be added as part of the object in the RoboDK item tree (a reference object must be provided)
:param int projection_type: type of projection. Use the PROJECTION_* flags.
:return: added object/shape (0 if failed)
:rtype: :class:`.Item`
.. code-block:: python
:caption: Available projection types
PROJECTION_NONE = 0 # No projection
PROJECTION_CLOSEST = 1 # The projection will be the closest point on the surface
PROJECTION_ALONG_NORMAL = 2 # The projection will be done along the normal.
PROJECTION_ALONG_NORMAL_RECALC = 3 # The projection will be done along the normal. Furthermore, the normal will be recalculated according to the surface normal.
PROJECTION_CLOSEST_RECALC = 4 # The projection will be the closest point on the surface and the normals will be recalculated
PROJECTION_RECALC = 5 # The normals are recalculated according to the surface normal of the closest projection. The points are not changed.
.. seealso:: :func:`~robolink.Robolink.AddShape`, :func:`~robolink.Robolink.AddPoints`
"""
if isinstance(curve_points,list):
curve_points = Mat(curve_points).tr()
elif not isinstance(curve_points, Mat):
raise Exception("curve_points must be a 3xN or 6xN list or matrix")
self._check_connection()
command = 'AddWire'
self._send_line(command)
self._send_matrix(curve_points)
self._send_item(reference_object)
self._send_int(1 if add_to_ref else 0)
self._send_int(projection_type)
newitem = self._rec_item()
self._check_status()
return newitem
def AddPoints(self, points, reference_object=0, add_to_ref=False, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Adds a list of points to an object. The provided points must be a list of vertices. A vertex normal can be provided optionally.
:param points: list of points or matrix
:type points: :class:`robodk.Mat` (3xN matrix, or 6xN to provide point normals as ijk vectors)
:param reference_object: item to attach the newly added geometry (optional)
:type reference_object: :class:`.Item`
:param bool add_to_ref: If True, the points will be added as part of the object in the RoboDK item tree (a reference object must be provided)
:param int projection_type: type of projection. Use the PROJECTION_* flags.
:return: added object/shape (0 if failed)
:rtype: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.ProjectPoints`, :func:`~robolink.Robolink.AddShape`, :func:`~robolink.Robolink.AddCurve`
The difference between ProjectPoints and AddPoints is that ProjectPoints does not add the points to the RoboDK station.
"""
if isinstance(points,list):
points = Mat(points).tr()
elif not isinstance(points, Mat):
raise Exception("points must be a 3xN or 6xN list or matrix")
self._check_connection()
command = 'AddPoints'
self._send_line(command)
self._send_matrix(points)
self._send_item(reference_object)
self._send_int(1 if add_to_ref else 0)
self._send_int(projection_type)
newitem = self._rec_item()
self._check_status()
return newitem
def ProjectPoints(self, points, object_project, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Project a point or a list of points given its coordinates.
The provided points must be a list of [XYZ] coordinates. Optionally, a vertex normal can be provided [XYZijk].
It returns the projected points as a list of points (empty matrix if failed).
:param points: list of points to project
:type points: list of points (XYZ or XYZijk list of floats), or :class:`robodk.Mat` (3xN matrix, or 6xN to provide point normals as ijk vectors)
:param object_project: object to project the points
:type object_project: :class:`.Item`
:param projection_type: Type of projection. For example: PROJECTION_ALONG_NORMAL_RECALC will project along the point normal and recalculate the normal vector on the surface projected.
:type projection_type: int
The difference between ProjectPoints and AddPoints is that ProjectPoints does not add the points to the RoboDK station.
"""
islist = False
if isinstance(points,list):
islist = True
points = Mat(points).tr()
# Safety check for backwards compatibility
if points.size(0) != 6 and points.size(1) == 6:
points = points.tr()
elif not isinstance(points, Mat):
raise Exception("points must be a 3xN or 6xN list or matrix")
self._check_connection()
command = 'ProjectPoints'
self._send_line(command)
self._send_matrix(points)
self._send_item(object_project)
self._send_int(projection_type)
self.COM.settimeout(30) # 30 seconds timeout
projected_points = self._rec_matrix() # will wait here
self.COM.settimeout(self.TIMEOUT)
self._check_status()
if islist:
projected_points = list(projected_points)
return projected_points
def CloseStation(self):
"""Closes the current RoboDK station without suggesting to save"""
self._require_build(12938)
self._check_connection()
self._send_line('RemoveStn')
self._check_status()
def Save(self, filename, itemsave=0):
"""Save an item or a station to a file (formats supported include RDK, STL, ROBOT, TOOL, ...). If no item is provided, the open station is saved.
:param str filename: File path to save
:param itemsave: Item to save (leave at 0 to save the current RoboDK station as an RDK file
:type itemsave: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddFile`
"""
self._check_connection()
command = 'Save'
self._send_line(command)
self._send_line(filename)
self._send_item(itemsave)
self._check_status()
def AddStation(self, name='New Station'):
"""Add a new empty station. It returns the station :class:`.Item` created.
:param str name: name of the station
.. seealso:: :func:`~robolink.Robolink.AddFile`"""
self._check_connection()
command = 'NewStation'
self._send_line(command)
self._send_line(name)
newitem = self._rec_item()
self._check_status()
return newitem
def AddTarget(self, name, itemparent=0, itemrobot=0):
"""Add a new target that can be reached with a robot.
:param str name: Target name
:param itemparent: Reference frame to attach the target
:type itemparent: :class:`.Item`
:param itemrobot: Robot that will be used to go to self target (optional)
:type itemrobot: :class:`.Item`
:return: New target item created
:rtype: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddFrame`
"""
self._check_connection()
command = 'Add_TARGET'
self._send_line(command)
self._send_line(name)
self._send_item(itemparent)
self._send_item(itemrobot)
newitem = self._rec_item()
self._check_status()
return newitem
def AddFrame(self, name, itemparent=0):
"""Adds a new reference Frame. It returns the new :class:`.Item` created.
:param str name: name of the new reference frame
:param itemparent: Item to attach the new reference frame (such as another reference frame)
:type itemparent: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddTarget`"""
self._check_connection()
command = 'Add_FRAME'
self._send_line(command)
self._send_line(name)
self._send_item(itemparent)
newitem = self._rec_item()
self._check_status()
return newitem
def AddProgram(self, name, itemrobot=0):
"""Add a new program to the RoboDK station. Programs can be used to simulate a specific sequence, to generate vendor specific programs (Offline Programming) or to run programs on the robot (Online Programming).
It returns the new :class:`.Item` created.
Tip: Use the MoveRobotThroughLine.py macro to create programs in the RoboDK station (Option 2).
:param name: Name of the program
:type name: str
:param itemrobot: Robot that will be used for this program. It is not required to specify the robot if the station has only one robot or mechanism.
:type itemrobot: :class:`.Item`
:return: New program item
:rtype: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddTarget`, :func:`~robolink.Item.MoveJ`, :func:`~robolink.Item.MoveL`, :func:`~robolink.Item.setDO`, :func:`~robolink.Item.waitDI`, :func:`~robolink.Item.Pause`, :func:`~robolink.Item.RunCodeCustom`, :func:`~robolink.Item.RunInstruction`, :func:`~robolink.Item.ShowInstructions`, :func:`~robolink.Item.ShowTargets`, :func:`~robolink.Item.Update`
Example 1 - Generic program with movements:
.. code-block:: python
# Turn off rendering (faster)
RDK.Render(False)
prog = RDK.AddProgram('AutoProgram')
# Hide program instructions (optional, but faster)
prog.ShowInstructions(False)
# Retrieve the current robot position:
pose_ref = robot.Pose()
# Iterate through a number of points
for i in range(len(POINTS)):
# add a new target
ti = RDK.AddTarget('Auto Target %i' % (i+1))
# use the reference pose and update the XYZ position
pose_ref.setPos(POINTS[i])
ti.setPose(pose_ref)
# force to use the target as a Cartesian target (default)
ti.setAsCartesianTarget()
# Add the target as a Linear/Joint move in the new program
prog.MoveL(ti)
# Hide the target items from the tree: it each movement still keeps its own target.
# Right click the movement instruction and select "Select Target" to see the target in the tree
program.ShowTargets(False)
# Turn rendering ON before starting the simulation (automatic if we are done)
RDK.Render(True)
#--------------------------------------
# Update the program path to display the yellow path in RoboDK.
# Set collision checking ON or OFF
check_collisions = COLLISION_OFF
# Update the path (can take some time if collision checking is active)
update_result = program.Update(check_collisions)
# Retrieve the result
n_insok = update_result[0]
time = update_result[1]
distance = update_result[2]
percent_ok = update_result[3]*100
str_problems = update_result[4]
if percent_ok < 100.0:
msg_str = "WARNING! Problems with <strong>%s</strong> (%.1f):<br>%s" % (program_name, percent_ok, str_problems)
else:
msg_str = "No problems found for program %s" % program_name
# Notify the user:
print(msg_str)
RDK.ShowMessage(msg_str)
Example 2 - Program flow, manage inputs/outputs and program calls:
.. code-block:: python
# Add a pause (in miliseconds)
program.Pause(1000) # pause motion 1 second
# Stop the program so that it can be resumed
# It provokes a STOP (pause until the operator desires to resume)
program.Pause()
# Add a program call or specific code in the program:
program.RunInstruction('ChangeTool(2)',INSTRUCTION_CALL_PROGRAM)
program.RunInstruction('ChangeTool(2);',INSTRUCTION_INSERT_CODE)
# Set a digital output
program.setDO('DO_NAME', 1)
# Wait for a digital input:
program.waitDI('DI_NAME', 1)
Example 3 - Add movements with external axes:
.. code-block:: python
# Add a new movement involving external axes:
# First: create a new target
target = RDK.AddTarget("T1", reference)
# Set the target as Cartesian (default)
target.setAsCartesianTarget()
# Specify the position of the external axes:
external_axes = [10, 20]
# The robot joints are calculated to reach the target
# given the position of the external axes
target.setJoints([0,0,0,0,0,0] + external_axes)
# Specify the pose (position with respect to the reference frame):
target.setPose(KUKA_2_Pose([x,y,z,w,p,r]))
# Add a new movement instruction linked to that target:
program.MoveJ(target)
Example 4 - Add a program call after each movement instruction inside a program:
.. code-block:: python
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
RDK = Robolink()
# Ask the user to select a program:
prog = RDK.ItemUserPick("Select a Program to modify", ITEM_TYPE_PROGRAM)
if not prog.Valid():
print("Operation cancelled or no programs available")
quit()
# Ask the user to enter a function call that will be added after each movement:
print("Program selected: " + prog.Name())
ins_call = mbox("Enter a program call to add after each movement", entry="SynchRobot")
if not ins_call:
print("Operation cancelled")
quit()
# Iterate through all the instructions in a program:
ins_id = 0
ins_count = prog.InstructionCount()
while ins_id < ins_count:
# Retrieve instruction
ins_nom, ins_type, move_type, isjointtarget, pose, joints = prog.Instruction(ins_id)
if ins_type == INS_TYPE_MOVE:
# Select the movement instruction as a reference
prog.InstructionSelect(ins_id)
# Add a new program call
prog.RunInstruction(ins_call, INSTRUCTION_CALL_PROGRAM)
# Advance one additional instruction as we just added another instruction
ins_id = ins_id + 1
ins_count = ins_count + 1
ins_id = ins_id + 1
More examples to generate programs directly from your script or move the robot directly from your program here:
:ref:`lbl-move-through-points`. or the macro available in RoboDK/Library/Macros/MoveRobotThroughLine.py
"""
self._check_connection()
command = 'Add_PROG'
self._send_line(command)
self._send_line(name)
self._send_item(itemrobot)
newitem = self._rec_item()
self._check_status()
return newitem
def AddMillingProject(self, name='Milling settings', itemrobot=0):
"""Obsolete, use :func:`~robolink.Robolink.AddMachiningProject` instead"""
return self.AddMachiningProject(name, itemrobot)
def AddMachiningProject(self, name='Milling settings', itemrobot=0):
"""Add a new robot machining project. Machining projects can also be used for 3D printing, following curves and following points.
It returns the newly created :class:`.Item` containing the project settings.
Tip: Use the MoveRobotThroughLine.py macro to see an example that creates a new "curve follow project" given a list of points to follow (Option 4).
:param str name: Name of the project settings
:param itemrobot: Robot to use for the project settings (optional). It is not required to specify the robot if only one robot or mechanism is available in the RoboDK station.
:type itemrobot: :class:`.Item`
.. seealso:: :func:`~robolink.Item.setMachiningParameters`"""
self._check_connection()
command = 'Add_MACHINING'
self._send_line(command)
self._send_line(name)
self._send_item(itemrobot)
newitem = self._rec_item()
self._check_status()
return newitem
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def RunProgram(self, fcn_param, wait_for_finished = False):
"""Run a program (start a program). If the program exists in the RoboDK station it has the same behavior as right clicking a and selecting Run (or Run Python script for Python programs).
When generating a program offline (Offline Programming), the program call will be generated in the program output (RoboDK will handle the syntax when the code is generated for a specific robot using the post processor).
:param fcn_param: program name and parameters. Parameters can be provided for Python programs available in the RoboDK station as well.
:type fcn_param: str
:param bool wait_for_finished: Set to True to block execution during a simulation until the program finishes (skipped if the program does not exist or when the program is generated)
.. seealso:: :func:`~robolink.Robolink.Item`, :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.Busy`
"""
if wait_for_finished:
prog_item = self.Item(fcn_param, ITEM_TYPE_PROGRAM)
if not prog_item.Valid():
raise Exception('Invalid program %s' % fcn_param)
prog_status = prog_item.RunProgram()
prog_item.WaitFinished()
else:
prog_status = self.RunCode(fcn_param, True)
return prog_status
def RunCode(self, code, code_is_fcn_call=False):
"""Generate a program call or a customized instruction output in a program.
If code_is_fcn_call is set to True it has the same behavior as RDK.RunProgram(). In this case, when generating a program offline (offline programming), a function/procedure call will be generated in the program output (RoboDK will handle the syntax when the code is generated for a specific robot using the post processor).
If the program exists it will also run the program in simulate mode.
:param code: program name or code to generate
:type code: str
:param code_is_fcn_call: Set to True if the provided code corresponds to a function call (same as RunProgram()), if so, RoboDK will handle the syntax when the code is generated for a specific robot.
:type code_is_fcn_call: bool
Example to run an existing program in the RoboDK station:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
RDK.RunCode("Prog1", True) # Run a program named Prog1 available in the RoboDK station
"""
self._check_connection()
command = 'RunCode'
self._send_line(command)
self._send_int(code_is_fcn_call)
self._send_line(code.replace('\r\n','<<br>>').replace('\n','<<br>>'))
prog_status = self._rec_int()
self._check_status()
return prog_status
def RunMessage(self, message, message_is_comment=False):
"""Show a message or a comment in the program generated offline (program generation). The message (or code) is displayed on the teach pendant of the robot.
:param str message: message or comment to display.
:param bool message_is_comment: Set to True to generate a comment in the generated code instead of displaying a message on the teach pendant of the robot.
"""
print('Message: ' + message)
self._check_connection()
command = 'RunMessage'
self._send_line(command)
self._send_int(message_is_comment)
self._send_line(message.replace('\r\n','<<br>>').replace('\n','<<br>>'))
self._check_status()
def Render(self, always_render=False):
"""Display/render the scene: update the display. This function turns default rendering (rendering after any modification of the station unless always_render is set to true).
Use Update to update the internal links of the complete station without rendering (when a robot or item has been moved).
:param bool always_render: Set to True to update the screen every time the station is modified (default behavior when Render() is not used).
.. seealso:: :func:`~robolink.Robolink.Update`
"""
auto_render = not always_render;
self._check_connection()
command = 'Render'
self._send_line(command)
self._send_int(auto_render)
self._check_status()
def Update(self):
"""Update the screen. This updates the position of all robots and internal links according to previously set values.
This function is useful when Render is turned off (Example: "RDK.Render(False)"). Otherwise, by default RoboDK will update all links after any modification of the station (when robots or items are moved).
.. seealso:: :func:`~robolink.Robolink.Render`"""
self._check_connection()
command = 'Refresh'
self._send_line(command)
self._send_int(0)
self._check_status()
def IsInside(self, object_inside, object):
"""Return 1 (True) if object_inside is inside the object, otherwise, it returns 0 (False). Both objects must be of type :class:`.Item`"""
self._check_connection()
self._send_line('IsInside')
self._send_item(object_inside)
self._send_item(object)
inside = self._rec_int()
self._check_status()
return inside
def setCollisionActive(self, check_state = COLLISION_ON):
"""Set collision checking ON or OFF (COLLISION_ON/COLLISION_OFF) for a specific pair of objects (:class:`.Item`). This allows altering the collision map for Collision checking.
.. seealso:: :func:`~robolink.Robolink.setCollisionActivePair`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collision_SetState'
self._send_line(command)
self._send_int(check_state)
ncollisions = self._rec_int()
self._check_status()
return ncollisions
def setCollisionActivePair(self, check_state, item1, item2, id1=0, id2=0):
"""Set collision checking ON or OFF (COLLISION_ON/COLLISION_OFF) for a specific pair of objects. Specify the link id for robots or moving mechanisms (id 0 is the base)
Returns 1 if succeeded. Returns 0 if setting the pair failed (wrong id is provided)
.. seealso:: :func:`~robolink.Robolink.setCollisionActive`, :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collision_SetPair'
self._send_line(command)
self._send_item(item1)
self._send_item(item2)
self._send_int(id1)
self._send_int(id2)
self._send_int(check_state)
success = self._rec_int()
self._check_status()
return success
def setCollisionActivePairList(self, list_check_state, list_item1, list_item2, list_id1=None, list_id2=None):
"""Set collision checking ON or OFF (COLLISION_ON/COLLISION_OFF) for a specific list of pairs of objects. This allows altering the collision map for Collision checking.
Specify the link id for robots or moving mechanisms (id 0 is the base).
.. seealso:: :func:`~robolink.Robolink.setCollisionActive`, :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Item.setCollisionActivePair`
"""
npairs = min(len(list_check_state), min(len(list_item1), len(list_item2)))
self._check_connection()
self._send_line("Collision_SetPairList")
self._send_int(npairs)
for i in range(npairs):
self._send_item(list_item1[i])
self._send_item(list_item2[i])
id1 = 0
id2 = 0
if list_id1 is not None and len(list_id1) > i:
id1 = list_id1[i]
if list_id2 is not None and len(list_id2) > i:
id2 = list_id2[i]
self._send_int(id1)
self._send_int(id2)
self._send_int(list_check_state[i])
success = self._rec_int()
self._check_status()
return success
def Collisions(self):
"""Return the number of pairs of objects that are currently in a collision state.
.. seealso:: :func:`~robolink.Robolink.setCollisionActive`, :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Robolink.CollisionItems`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collisions'
self._send_line(command)
ncollisions = self._rec_int()
self._check_status()
return ncollisions
def Collision(self, item1, item2):
"""Returns 1 if item1 and item2 collided. Otherwise returns 0.
.. seealso:: :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Robolink.CollisionItems`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collided'
self._send_line(command)
self._send_item(item1)
self._send_item(item2)
ncollisions = self._rec_int()
self._check_status()
return ncollisions
def CollisionItems(self):
"""Return the list of items that are in a collision state. This function can be used after calling Collisions() to retrieve the items that are in a collision state.
.. seealso:: :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collision_Items'
self._send_line(command)
nitems = self._rec_int()
item_list = []
for i in range(nitems):
item_list.append(self._rec_item())
link_id = self._rec_int() # link id for robot items (ignored)
collision_times = self._rec_int() # number of objects it is in collisions with
self._check_status()
return item_list
def CollisionPairs(self):
"""Return the list of pairs of items that are in a collision state.
.. seealso:: :func:`~robolink.Robolink.Collisions`, :func:`~robolink.Item.Visible`
"""
self._check_connection()
command = 'Collision_Pairs'
self._send_line(command)
nitems = self._rec_int()
item_list = []
for i in range(nitems):
item_1 = self._rec_item()
id_1 = self._rec_int()
item_2 = self._rec_item()
id_2 = self._rec_int()
item_list.append([item_1, item_2, id_1, id_2])
self._check_status()
return item_list
def setSimulationSpeed(self, speed):
"""Set the simulation speed.
A simulation speed of 5 (default) means that 1 second of simulation time equals to 5 seconds in a real application.
The slowest speed ratio allowed is 0.001. Set a large simmulation ratio (>100) for fast simulation results.
:param speed: simulation ratio
:type speed: float
.. seealso:: :func:`~robolink.Robolink.SimulationSpeed`, :func:`~robolink.Robolink.SimulationTime`
"""
self._check_connection()
command = 'SimulateSpeed'
self._send_line(command)
self._send_int(speed*1000)
self._check_status()
def SimulationSpeed(self):
"""Return the simulation speed. A simulation speed of 1 means real-time simulation.
A simulation speed of 5 (default) means that 1 second of simulation time equals to 5 seconds in a real application.
.. seealso:: :func:`~robolink.Robolink.setSimulationSpeed`
"""
self._check_connection()
command = 'GetSimulateSpeed'
self._send_line(command)
speed = self._rec_int()/1000.0
self._check_status()
return speed
def SimulationTime(self):
"""Retrieve the simulation time (in seconds). Time of 0 seconds starts with the first time this function is called.
The simulation time changes depending on the simulation speed. The simulation time is usually faster than the real time (5 times by default).
.. seealso:: :func:`~robolink.Robolink.setSimulationSpeed`, :func:`~robolink.Robolink.SimulationSpeed`
"""
self._check_connection()
command = 'GetSimTime'
self._send_line(command)
speed = self._rec_int()/1000.0
self._check_status()
return speed
def setRunMode(self, run_mode=1):
"""Set the run mode (behavior) of the script, for either simulation, offline programming or online programming.
By default, robodk shows the path simulation for movement instructions (run_mode=RUNMODE_SIMULATE).
.. code-block:: python
:caption: Available run modes
RUNMODE_SIMULATE=1 # performs the simulation moving the robot (default)
RUNMODE_QUICKVALIDATE=2 # performs a quick check to validate the robot movements
RUNMODE_MAKE_ROBOTPROG=3 # makes the robot program
RUNMODE_MAKE_ROBOTPROG_AND_UPLOAD=4 # makes the robot program and updates it to the robot
RUNMODE_MAKE_ROBOTPROG_AND_START=5 # makes the robot program and starts it on the robot (independently from the PC)
RUNMODE_RUN_ROBOT=6 # moves the real robot from the PC (PC is the client, the robot behaves like a server)
The following calls will alter the current run mode:
1- :func:`~robolink.Item.Connect` automatically sets RUNMODE_RUN_ROBOT. So it will use the robot driver together with the simulation.
2- :func:`~robolink.Robolink.ProgramStart` automatically sets the mode to RUNMODE_MAKE_ROBOTPROG. So it will generate the program
.. seealso:: :func:`~robolink.Robolink.RunMode`
"""
self._check_connection()
command = 'S_RunMode'
self._send_line(command)
self._send_int(run_mode)
self._check_status()
def RunMode(self):
"""Return the current run mode (behavior) of the script.
By default, robodk simulates any movements requested from the API (such as prog.MoveL) simulation for movement instructions (run_mode=RUNMODE_SIMULATE).
.. seealso:: :func:`~robolink.Robolink.setRunMode`
"""
self._check_connection()
command = 'G_RunMode'
self._send_line(command)
runmode = self._rec_int()
self._check_status()
return runmode
def getParams(self):
"""Get all the user parameters from the open RoboDK station.
Station parameters can also be modified manually by right clicking the station item and selecting "Station parameters"
:return: list of pairs of strings
:rtype: list of str
.. seealso:: :func:`~robolink.Robolink.getParam`, :func:`~robolink.Robolink.setParam`
"""
self._check_connection()
command = 'G_Params'
self._send_line(command)
nparam = self._rec_int()
params = []
for i in range(nparam):
param = self._rec_line()
value = self._rec_line()
if value.replace('.','',1).isnumeric():
value = float(value) # automatically convert int, long and float
params.append([param, value])
self._check_status()
return params
def getParam(self, param='PATH_OPENSTATION', str_type=True):
"""Get a global or a station parameter from the open RoboDK station.
Station parameters can also be modified manually by right clicking the station item and selecting "Station parameters"
:param str param: name of the parameter
:param bool str_type: True to retrieve a string parameter (False for binary/bytes type)
:return: value of the parameter.
:rtype: str, float or None if the parameter is unknown
.. code-block:: python
:caption: Available global parameters
PATH_OPENSTATION # Full path of the current station (.rdk file)
FILE_OPENSTATION # File name of the current station (name of the .rdk file)
PATH_DESKTOP # Full path to the desktop folder
.. seealso:: :func:`~robolink.Robolink.setParam`, :func:`~robolink.Robolink.getParams`
"""
self._check_connection()
if str_type:
command = 'G_Param'
self._send_line(command)
self._send_line(param)
value = self._rec_line()
self._check_status()
if value.startswith('UNKNOWN '):
return None
if value.replace('.','',1).isnumeric():
value = float(value) # automatically convert int, long and float
return value
else:
command = 'G_DataParam'
self._send_line(command)
self._send_line(param)
value = self._rec_bytes()
self._check_status()
return value
def setParam(self, param, value):
"""Set a station parameter. If the parameters exists, it will be updated. Otherwise, it will be added to the station.
:param str param: name of the parameter
:param str value: value of the parameter (value type can be str or bytes)
.. seealso:: :func:`~robolink.Robolink.getParam`
"""
self._check_connection()
if isinstance(value,bytes):
command = 'S_DataParam'
self._send_line(command)
self._send_line(str(param))
self._send_bytes(value)
else:
command = 'S_Param'
self._send_line(command)
self._send_line(str(param))
self._send_line(str(value).replace('\n',' '))
self._check_status()
def Command(self, cmd, value=''):
"""Send a special command. These commands are meant to have a specific effect in RoboDK, such as changing a specific setting or provoke specific events.
:param str command: Command Name, such as Trace, Threads or Window.
:param str value: Comand value (optional, not all commands require a value)
You can select Tools-Run Script-Show Commands to see all available commands.
.. image:: Commands.png
.. code-block:: python
:caption: Example commands
from robolink import *
RDK = Robolink() # Start the RoboDK API
# How to change the number of threads using by the RoboDK application:
RDK.Command("Threads", "4")
# How to change the default behavior of 3D view using the mouse:
RDK.Command("MouseClick_Left", "Select") # Set the left mouse click to select
RDK.Command("MouseClick_Mid", "Pan") # Set the mid mouse click to Pan the 3D view
RDK.Command("MouseClick_Right", "Rotate") # Set the right mouse click to Rotate the 3D view
RDK.Command("MouseClick", "Default") # Set the default mouse 3D navigation settings
# Provoke a resize event
RDK.Command("Window", "Resize")
# Reset the trace
RDK.Command("Trace", "Reset")
You can also pass commands through command line when starting RoboDK or when RoboDK is already running (add '-' to the command name).
More information about command line options available in the documentation: https://robodk.com/doc/en/RoboDK-API.html#CommandLine
.. code-block:: python
:caption: Example to start RoboDK in Chinese and white background using 6 threads and load a RoboDK project file
RoboDK -Lang=zh -ColorBgBottom=white -ColorBgTop=white -Threads=6 "path-to-file.rdk"
.. seealso:: :func:`~robolink.Robolink.setParam`
"""
self._check_connection()
command = 'SCMD'
self._send_line(command)
self._send_line(str(cmd))
self._send_line(str(value).replace('\n','<br>'))
self.COM.settimeout(3600)
line = self._rec_line()
self.COM.settimeout(self.TIMEOUT)
self._check_status()
return line
def getOpenStations(self):
"""Returns the list of open stations in RoboDK
.. seealso:: :func:`~robolink.Robolink.setActiveStation`, :func:`~robolink.Robolink.getParam`, :func:`~robolink.Item.Childs`, :func:`~robolink.Item.Save`, :func:`~robolink.Robolink.AddStation`
"""
self._check_connection()
command = 'G_AllStn'
self._send_line(command)
nstn = self._rec_int()
list_stn = []
for i in range(nstn):
list_stn.append(self._rec_item())
self._check_status()
return list_stn
def ActiveStation(self):
"""Returns the active station item (station currently visible)
.. seealso:: :func:`~robolink.Robolink.setActiveStation`, :func:`~robolink.Robolink.getParam`, :func:`~robolink.Item.Childs`, :func:`~robolink.Item.Save`, :func:`~robolink.Robolink.AddStation`
"""
self._check_connection()
command = 'G_ActiveStn'
self._send_line(command)
stn = self._rec_item()
self._check_status()
return stn
def setActiveStation(self, stn):
"""Set the active station (project currently visible)
:param stn: station item, it can be previously loaded as an RDK file
:type stn: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.ActiveStation`, :func:`~robolink.Robolink.getOpenStations`, :func:`~robolink.Robolink.getParam`, :func:`~robolink.Item.Childs`, :func:`~robolink.Robolink.AddFile`, :func:`~robolink.Robolink.AddStation`
"""
self._check_connection()
command = 'S_ActiveStn'
self._send_line(command)
self._send_item(stn)
self._check_status()
def ShowSequence(self, matrix):
"""Display a sequence of joints given a list of joints as a matrix.
This function can also display a sequence of instructions (RoKiSim format).
:param matrix: joint sequence as a 6xN matrix or instruction sequence as a 7xN matrix
:type matrix: :class:`.Mat`
Tip: use :func:`~robolink.Item.InstructionList` to retrieve the instruction list in RoKiSim format.
"""
Item(self, 0).ShowSequence(matrix)
def LaserTracker_Measure(self, estimate=[0,0,0], search=False):
"""Takes a laser tracker measurement with respect to its own reference frame. If an estimate point is provided, the laser tracker will first move to those coordinates. If search is True, the tracker will search for a target.
Returns the XYZ coordinates of target if it was found. Othewise it retuns None."""
self._check_connection()
command = 'MeasLT'
self._send_line(command)
self._send_xyz(estimate)
self._send_int(1 if search else 0)
xyz = self._rec_xyz()
self._check_status()
if xyz[0]*xyz[0] + xyz[1]*xyz[1] + xyz[2]*xyz[2] < 0.0001:
return None
return xyz
def StereoCamera_Measure(self):
"""Takes a measurement with the C-Track stereocamera.
It returns two poses, the base reference frame and the measured object reference frame. Status is 0 if measurement succeeded."""
self._check_connection()
command = 'MeasPose'
self._send_line(command)
pose1 = self._rec_pose()
pose2 = self._rec_pose()
npoints1 = self._rec_int()
npoints2 = self._rec_int()
time = self._rec_int()
status = self._rec_int()
self._check_status()
return pose1, pose2, npoints1, npoints2, time, status
def Collision_Line(self, p1, p2, ref=eye(4)):
"""Checks the collision between a line and any objects in the station. The line is composed by 2 points.
:param p1: start point of the line
:type p1: list of float [x,y,z]
:param p2: end point of the line
:type p2: list of float [x,y,z]
:param ref: Reference of the two points with respect to the absolute station reference.
:type ref: :class:`.Mat`
:return: [collision (True or False), item (collided), point (point of collision with respect to the station)]
:rtype: [bool, :class:`.Item`, list of float as xyz]
"""
p1abs = ref*p1
p2abs = ref*p2
self._check_connection()
command = 'CollisionLine'
self._send_line(command)
self._send_xyz(p1abs)
self._send_xyz(p2abs)
itempicked = self._rec_item()
xyz = self._rec_xyz()
collision = itempicked.Valid()
self._check_status()
return collision, itempicked, xyz
def setPoses(self, items, poses):
"""Sets the relative positions (poses) of a list of items with respect to their parent. For example, the position of an object/frame/target with respect to its parent.
Use this function instead of setPose() for faster speed.
.. seealso:: :func:`~robolink.Item.setPose` (item), :func:`~robolink.Item.Pose` (item), :func:`~robolink.Robolink.setPosesAbs`
"""
if len(items) != len(poses):
raise Exception('The number of items must match the number of poses')
if len(items) == 0:
return
self._check_connection()
command = 'S_Hlocals'
self._send_line(command)
self._send_int(len(items))
for i in range(len(items)):
self._send_item(items[i])
self._send_pose(poses[i])
self._check_status()
def setPosesAbs(self, items, poses):
"""Set the absolute positions (poses) of a list of items with respect to the station reference. For example, the position of an object/frame/target with respect to its parent.
Use this function instead of setPose() for faster speed.
.. seealso:: :func:`~robolink.Item.setPoseAbs` (item), :func:`~robolink.Item.PoseAbs` (item), :func:`~robolink.Robolink.setPoses`
"""
if len(items) != len(poses):
raise Exception('The number of items must match the number of poses')
if len(items) == 0:
return
self._check_connection()
command = 'S_Hlocal_AbsS'
self._send_line(command)
self._send_int(len(items))
for i in range(len(items)):
self._send_item(items[i])
self._send_pose(poses[i])
self._check_status()
def Joints(self, robot_item_list):
"""Return the current joints of a list of robots.
.. seealso:: :func:`~robolink.Item.setJoints` (item), :func:`~robolink.Item.Joints` (item), :func:`~robolink.Robolink.setJoints`
"""
self._check_connection()
command = 'G_ThetasList'
self._send_line(command)
nrobs = len(robot_item_list)
self._send_int(nrobs)
joints_list = []
for i in range(nrobs):
self._send_item(robot_item_list[i])
joints_i = self._rec_array()
joints_list.append(joints_i)
self._check_status()
return joints_list
def setJoints(self, robot_item_list, joints_list):
"""Sets the current robot joints for a list of robot items and a list joints.
.. seealso:: :func:`~robolink.Item.setJoints` (item), :func:`~robolink.Item.Joints` (item), :func:`~robolink.Robolink.Joints`"""
nrobs = len(robot_item_list)
if nrobs != len(joints_list):
raise Exception('The size of the robot list does not match the size of the joints list')
self._check_connection()
command = 'S_ThetasList'
self._send_line(command)
self._send_int(nrobs)
for i in range(nrobs):
self._send_item(robot_item_list[i])
self._send_array(joints_list[i])
self._check_status()
def CalibrateTool(self, poses_xyzwpr, input_format=EULER_RX_RY_RZ, algorithm=CALIBRATE_TCP_BY_POINT, robot=None, tool=None):
"""Calibrate a TCP given a list of poses/joints and following a specific algorithm/method.
Tip: Provide the list of joints instead of poses to maximize accuracy for calibrated robots.
:param poses_xyzwpr: List of points or a list of robot joints (matrix 3xN or nDOFsxN)
:type poses_xyzwpr: :class:`.Mat` or a list of list of float
:param int input_format: Euler format. Optionally, use JOINT_FORMAT and provide the robot.
:param int algorithm: method/algorithm to use to calculate the new TCP. Tip: use CALIBRATE_TCP ...
:param robot: the robot must be provided to calculate the reference frame by joints
:type robot: :class:`.Item`
:param tool: provide a tool item to store the calibration data with that tool (the TCP is not updated, only the calibration joints)
:type tool: :class:`.Item`
:return: \n
[TCP, stats, errors]\n
Out 1 (TCP) - the TCP as a list [x,y,z] with respect to the robot flange\n
Out 2 (stats) - Statistics as [mean, standard deviation, max] - error stats summary\n
Out 3 (errors) - errors for each pose (array 1xN)\n
.. code-block:: python
:caption: Available Tool Calibration Algorithms
CALIBRATE_TCP_BY_POINT # Take the same point using different orientations
CALIBRATE_TCP_BY_PLANE # Take the same point on a plane
.. seealso:: :func:`~robolink.Robolink.CalibrateReference`
"""
self._check_connection()
command = 'CalibTCP3'
self._send_line(command)
self._send_matrix(poses_xyzwpr)
self._send_int(input_format)
if type(algorithm) != list:
algorithm = [algorithm]
self._send_array(algorithm)
self._send_item(robot)
self._send_item(tool)
self.COM.settimeout(3600)
TCPxyz = self._rec_array()
self.COM.settimeout(self.TIMEOUT)
errorstats = self._rec_array()
errors = self._rec_matrix()
self._check_status()
if errors.size(1) > 0:
errors = errors[:,1].list()
return TCPxyz.list(), errorstats.list(), errors
def CalibrateReference(self, joints_points, method=CALIBRATE_FRAME_3P_P1_ON_X, use_joints=False, robot=None):
"""Calibrate a reference frame given a number of points and following a specific algorithm/method.
Important: Provide the list of joints to maximize accuracy for calibrated robots.
:param joints_points: List of points or a list of robot joints (matrix 3xN or nDOFsxN)
:type joints_points: :class:`.Mat` or a list of list of float
:param int method: method/algorithm to use to calculate the new TCP. Tip: use CALIBRATE_FRAME ...
:param bool use_joints: use points or joint values (bool): Set to True if joints_points is a list of joints
:param robot: the robot must be provided to calculate the reference frame by joints
:type robot: :class:`.Item`
:return: The pose of the reference frame with respect to the robot base frame
:rtype: :class:`.Mat`
.. code-block:: python
:caption: Available Reference Frame Calibration Algorithms
CALIBRATE_FRAME_3P_P1_ON_X = 0 # Calibrate by 3 points: [X, X+, Y+] (p1 on X axis)
CALIBRATE_FRAME_3P_P1_ORIGIN = 1 # Calibrate by 3 points: [Origin, X+, XY+] (p1 is origin)
CALIBRATE_FRAME_6P = 2 # Calibrate by 6 points
CALIBRATE_TURNTABLE = 3 # Calibrate turntable
.. seealso:: :func:`~robolink.Robolink.CalibrateTool`
"""
self._check_connection()
command = 'CalibFrame'
self._send_line(command)
self._send_matrix(joints_points)
self._send_int(-1 if use_joints else 0)
self._send_int(method)
self._send_item(robot)
reference_pose = self._rec_pose()
errorstats = self._rec_array()
self._check_status()
return reference_pose
def ProgramStart(self, programname, folder='', postprocessor='', robot=None):
"""Defines the name of the program when the program is generated (offline programming).
It is also possible to specify the name of the post processor as well as the folder to save the program.
This method must be called before any program output is generated (before any robot movement or other instruction).
:param str progname: Name of the program
:param str folder: Folder to save the program, leave empty to use the default program folder (usually Desktop)
:param str postprocessor: Name of the post processor. For example, to select the post processor C:/RoboDK/Posts/Fanuc_RJ3.py, specify "Fanuc_RJ3.py" or simply "Fanuc_RJ3".
:param robot: Robot used for program generation
:type robot: :class:`.Item`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
robot = RDK.Item('', ITEM_TYPE_ROBOT) # use the first available robot
RDK.ProgramStart('Prog1','C:/MyProgramFolder/', "ABB_RAPID_IRC5", robot) # specify the program name for program generation
# RDK.setRunMode(RUNMODE_MAKE_ROBOTPROG) # redundant
robot.MoveJ(target) # make a simulation
...
RDK.Finish() # Provokes the program generation (disconnects the API)
.. seealso:: :func:`~robolink.Robolink.setRunMode`, :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Robolink.Finish`
"""
self._check_connection()
command = 'ProgramStart'
self._send_line(command)
self._send_line(programname)
self._send_line(folder)
self._send_line(postprocessor)
if robot is None:
self._send_item(Item(None))
else:
self._send_item(robot)
errors = self._rec_int()
self._check_status()
return errors
def setViewPose(self, pose):
"""Set the pose of the wold reference frame with respect to the view (camera/screen)
:param pose: pose of the item with respect to its parent
:type pose: :class:`.Mat`
"""
self._check_connection()
command = 'S_ViewPose'
self._send_line(command)
self._send_pose(pose)
self._check_status()
def ViewPose(self):
"""Get the pose of the wold reference frame with respect to the view (camera/screen)"""
self._check_connection()
command = 'G_ViewPose'
self._send_line(command)
pose = self._rec_pose()
self._check_status()
return pose
def BuildMechanism(self, type, list_obj, parameters, joints_build, joints_home, joints_senses, joints_lim_low, joints_lim_high, base=eye(4), tool=eye(4), name="New robot", robot=None):
"""Create a new robot or mechanism.
:param int type: Type of the mechanism
:param list list_obj: list of object items that build the robot
:param list parameters: robot parameters in the same order as shown in the RoboDK menu: Utilities-Build Mechanism or robot
:param list_joints_build: current state of the robot (joint axes) to build the robot
:param list joints_home: joints for the home position (it can be changed later)
:param robot: existing robot in the station to replace it (optional)
:type robot: :class:`.Item`
:param str name: robot name
Example:
.. code-block:: python
# Start the RoboDK API
from robolink import *
from robodk import *
RDK = Robolink()
# Define your new robot or mechanism
# Example to create a Fanuc LR Mate 200iD robot
robot_name = 'Fanuc LR Mate 200iD'
DOFs = 6
# Define the joints of the robot/mechanism
joints_build = [0, 0, 0, 0, 0, 0]
# Define the home position of the robot/mechanism (default position when you build the mechanism)
# This is also the position the robot goes to if you select "Home"
joints_home = [0, 0, 0, 0, 0, 0]
# Define the robot parameters. The parameters must be provided in the same order they appear
# in the menu Utilities-Model Mechanism or robot
# Some basic mechanisms such as 1 or 2 axis translation/rotation axes don't need any parameters
# (translation/rotation will happen around the Z axis)
#parameters = []
parameters = [330, 50, 0, 330, 35, 335, 80, 0, -90, 0, 0, 0, 0]
# Define the joint sense (set to +1 or -1 for each axis (+1 is used as a reference for the ABB IRB120 robot)
joints_senses = [+1, +1, -1, -1, -1, -1] # add -1 as 7th index to account for axis 2 and axis 3 coupling
# Joint limits (lower limits for each axis)
lower_limits = [-170, -100, -67, -190, -125, -360]
# Joint limits (upper limits for each axis)
upper_limits = [ 170, 145, 213, 190, 125, 360]
# Base frame pose (offset the model by applying a base frame transformation)
#base_pose = xyzrpw_2_pose([0, 0, 0, 0, 0, 0])
# Fanuc and Motoman robots have the base frame at the intersection of axes 1 and 2
base_pose = xyzrpw_2_pose([0, 0, -330, 0, 0, 0])
# Tool frame pose (offset the tool flange by applying a tool frame transformation)
tool_pose = xyzrpw_2_pose([0, 0, 0, 0, 0, 0])
# Retrieve all your items from RoboDK (they should be previously loaded manually or using the API's command RDK.AddFile())
list_objects = []
for i in range(DOFs + 1):
if i == 0:
itm = RDK.Item(robot_name + ' Base', ITEM_TYPE_OBJECT)
else:
itm = RDK.Item(robot_name + ' ' + str(i), ITEM_TYPE_OBJECT)
list_objects.append(itm)
# Create the robot/mechanism
new_robot = RDK.BuildMechanism(MAKE_ROBOT_6DOF, list_objects, parameters, joints_build, joints_home, joints_senses, lower_limits, upper_limits, base_pose, tool_pose, robot_name)
if not new_robot.Valid():
print("Failed to create the robot. Check input values.")
else:
print("Robot/mechanism created: " + new_robot.Name())
"""
# calculate the number of degrees of freedom
ndofs = len(list_obj) - 1
self._check_connection()
command = 'BuildMechanism'
self._send_line(command)
self._send_item(robot)
self._send_line(name)
self._send_int(type)
self._send_int(ndofs)
for i in range(ndofs+1):
self._send_item(list_obj[i])
self._send_pose(base)
self._send_pose(tool)
self._send_array(parameters)
if len(joints_build) < 12:
joints_build += [0]*(12-len(joints_build))
joints_data = Mat([joints_build, joints_home, joints_senses, joints_lim_low, joints_lim_high]).tr()
self._send_matrix(joints_data)
robot = self._rec_item()
self._check_status()
return robot
#------------------------------------------------------------------
#----------------------- CAMERA VIEWS ----------------------------
def Cam2D_Add(self, item_object, cam_params=""):
"""Open a simulated 2D camera view. Returns a handle pointer that can be used in case more than one simulated view is used.
:param item_object: object to attach the camera
:type item_object: :class:`.Item`
:param str cam_params: Camera parameters as a string. Add one or more commands as shown in the following example.
Example:
.. code-block:: python
from robolink import * # API to communicate with RoboDK
from robodk import * # library for basic matrix operations
RDK = Robolink()
# Close any open 2D camera views
RDK.Cam2D_Close()
camref = RDK.ItemUserPick('Select the Camera location (reference, tool or object)')
#camref = RDK.Item('Frame 7',ITEM_TYPE_FRAME)
# Set parameters in mm and degrees:
# FOV: Field of view in degrees (atan(0.5*height/distance) of the sensor
# FOCAL_LENGHT: focal lenght in mm
# FAR_LENGHT: maximum working distance (in mm)
# SIZE: size of the window in pixels (fixed) (width x height)
# SNAPSHOT: size of the snapshot image in pixels (width x height)
# BG_COLOR: background color (rgb color or named color: AARRGGBB)
# LIGHT_AMBIENT: ambient color (rgb color or named color: AARRGGBB)
# LIGHT_SPECULAR: specular color (rgb color or named color: AARRGGBB)
# LIGHT_DIFFUSE: diffuse color (rgb color or named color: AARRGGBB)
# DEPTH: Add this flag to create a 32 bit depth map (white=close, black=far)
# NO_TASKBAR: Don't add the window to the task bar
# MINIMIZED: Show the window minimized
# ALWAYS_VISIBLE: Keep the window on top of all other windows
# SHADER_VERTEX: File to a vertex shader (GLSL file)
# SHADER_FRAGMENT: File to a fragment shader (GLSL file)
# Examples to call Camd2D_Add:
# Camera without a fixed window size and 1000 mm length
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000')
# Camera with a fixed window size and 1000 mm length
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480')
# Camera with a black background
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 BG_COLOR=black')
# Camera without a fixed window size and high resolution snapshot
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480')
# Depth view: 32 bit depth map (white=close, black=far)
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 DEPTH')
# Minimized camera
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 MINIMIZED')
# Do not show the camera window in the taskbar
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 NO_TASKBAR')
# Customize the light
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 BG_COLOR=black LIGHT_AMBIENT=red LIGHT_DIFFUSE=#FF00FF00 LIGHT_SPECULAR=black')
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=600 SIZE=640x480 BG_COLOR=black LIGHT_AMBIENT=red LIGHT_DIFFUSE=black LIGHT_SPECULAR=white')
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=1000 SIZE=640x480 LIGHT_AMBIENT=red')
# Provoke a popup and allow the user to enter some parameters
cam_id = RDK.Cam2D_Add(camref, 'POPUP')
# Example to take a snapshot from the camera
RDK.Cam2D_Snapshot(RDK.getParam('PATH_OPENSTATION') + "/sample_image.png", cam_id)
# Special command to retrieve the window ID:
win_id = RDK.Command("CamWinID", str(cam_id))
# print(str(win_id))
#-----------------------------------------------------------------------------------
# Example to use a customized shader to customize the effect of light
# Tip: Use the example: C:/RoboDK/Library/Example-Shader-Customized-Light.rdk
# Tip: If you need a fixed light source update the variable light_Position in the shader_fragment.glsl file
# Get the path to the RoboDK library (usually in C:/RoboDK/Library/)
path_library = RDK.getParam("PATH_LIBRARY")
file_shader_fragment = path_library + '/Macros/Camera-Shaders/shader_fragment.glsl'
file_shader_vertex = path_library + '/Macros/Camera-Shaders/shader_vertex.glsl'
cam_id = RDK.Cam2D_Add(camref, 'FOCAL_LENGHT=6 FOV=32 FAR_LENGHT=2500 SHADER_FRAGMENT=' + file_shader_fragment + ' SHADER_VERTEX=' + file_shader_vertex)
.. seealso:: :func:`~robolink.Robolink.Cam2D_Snapshot`, :func:`~robolink.Robolink.Cam2D_Close`, :func:`~robolink.Robolink.Cam2D_SetParams`
"""
self._check_connection()
command = 'Cam2D_Add'
self._send_line(command)
self._send_item(item_object)
self._send_line(cam_params)
cam_handle = self._rec_ptr()
self._check_status()
return cam_handle
def Cam2D_Snapshot(self, file_save_img, cam_handle=0):
"""Take a snapshot from a simulated camera view and save it to a file. Returns 1 if success, 0 otherwise.
:param str file_save_img: file path to save. Formats supported include PNG, JPEG, TIFF, ...
:param int cam_handle: camera handle (pointer returned by Cam2D_Add)
.. seealso:: :func:`~robolink.Robolink.Cam2D_Add`, :func:`~robolink.Robolink.Cam2D_Close`
"""
self._check_connection()
command = 'Cam2D_Snapshot'
self._send_line(command)
self._send_ptr(int(cam_handle))
self._send_line(file_save_img)
success = self._rec_int()
self._check_status()
return success
def Cam2D_Close(self, cam_handle=0):
"""Closes all camera windows or one specific camera if the camera handle is provided. Returns 1 if success, 0 otherwise.
:param cam_handle: camera handle (pointer returned by Cam2D_Add). Leave to 0 to close all simulated views.
:type cam_handle: int
.. seealso:: :func:`~robolink.Robolink.Cam2D_Add`, :func:`~robolink.Robolink.Cam2D_Snapshot`"""
self._check_connection()
if cam_handle == 0:
command = 'Cam2D_CloseAll'
self._send_line(command)
else:
command = 'Cam2D_Close'
self._send_line(command)
self._send_ptr(cam_handle)
success = self._rec_int()
self._check_status()
return success
def Cam2D_SetParams(self, params, cam_handle=0):
"""Set the parameters of the simulated camera.
Returns 1 if success, 0 otherwise.
:param str params: parameter settings according to the parameters supported by Cam2D_Add
.. seealso:: :func:`~robolink.Robolink.Cam2D_Add`
"""
self._check_connection()
command = 'Cam2D_SetParams'
self._send_line(command)
self._send_ptr(int(cam_handle))
self._send_line(params)
success = self._rec_int()
self._check_status()
return success
#------------------------------------------------------------------
#----------------------- SPRAY GUN SIMULATION ----------------------------
def Spray_Add(self, item_tool=0, item_object=0, params="", points=None, geometry=None):
"""Add a simulated spray gun that allows projecting particles to a part. This is useful to simulate applications such as:
arc welding, spot welding, 3D printing, painting or inspection to verify the trace. The SprayOn.py macro shows an example to use this option.
It returns a pointer that can be used later for other operations, such as turning the spray ON or OFF.
:param str params: A string specifying the behavior of the simulated particles. The string can contain one or more of the following commands (separated by a space). See the allowed parameter options.
:param points: provide the volume as a list of points as described in the sample macro SprayOn.py
:type points: :class:`.Mat`
:param geometry: (optional) provide a list of points describing triangles to define a specific particle geometry. Use this option instead of the PARTICLE command.
:type geometry: :class:`.Mat`
.. code-block:: python
:caption: Allowed parameter options
STEP=AxB: Defines the grid to be projected 1x1 means only one line of particle projection (for example, for welding)
PARTICLE: Defines the shape and size of particle (sphere or particle), unless a specific geometry is provided:
a- SPHERE(radius, facets)
b- SPHERE(radius, facets, scalex, scaley, scalez)
b- CUBE(sizex, sizey, sizez)
RAND=factor: Defines a random factor factor 0 means that the particles are not deposited randomly
ELLYPSE: defines the volume as an ellypse (default)
RECTANGLE: defines the volume as a rectangle
PROJECT: project the particles to the surface (default) (for welding, painting or scanning)
NO_PROJECT: does not project the particles to the surface (for example, for 3D printing)
.. seealso:: :func:`~robolink.Robolink.Spray_SetState`, :func:`~robolink.Robolink.Spray_GetStats`, :func:`~robolink.Robolink.Spray_Clear`
Example:
.. code-block:: python
tool = 0 # auto detect active tool
obj = 0 # auto detect object in active reference frame
options_command = "ELLYPSE PROJECT PARTICLE=SPHERE(4,8,1,1,0.5) STEP=8x8 RAND=2"
# define the ellypse volume as p0, pA, pB, colorRGBA (close and far), in mm
# coordinates must be provided with respect to the TCP
close_p0 = [ 0, 0, -200] # xyz in mm: Center of the conical ellypse (side 1)
close_pA = [ 5, 0, -200] # xyz in mm: First vertex of the conical ellypse (side 1)
close_pB = [ 0, 10, -200] # xyz in mm: Second vertex of the conical ellypse (side 1)
close_color = [ 1, 0, 0, 1] # RGBA (0-1)
far_p0 = [ 0, 0, 50] # xyz in mm: Center of the conical ellypse (side 2)
far_pA = [ 60, 0, 50] # xyz in mm: First vertex of the conical ellypse (side 2)
far_pB = [ 0, 120, 50] # xyz in mm: Second vertex of the conical ellypse (side 2)
far_color = [ 0, 0, 1, 0.2] # RGBA (0-1)
close_param = close_p0 + close_pA + close_pB + close_color
far_param = far_p0 + far_pA + far_pB + far_color
volume = Mat([close_param, far_param]).tr()
RDK.Spray_Add(tool, obj, options_command, volume)
RDK.Spray_SetState(SPRAY_ON)
"""
self._check_connection()
command = 'Gun_Add'
self._send_line(command)
self._send_item(item_tool)
self._send_item(item_object)
self._send_line(params)
self._send_matrix(points)
self._send_matrix(geometry)
id_spray = self._rec_int()
self._check_status()
return id_spray
def Spray_SetState(self, state=SPRAY_ON, id_spray=-1):
"""Sets the state of a simulated spray gun (ON or OFF)
:param int state: Set to ON or OFF. Use the defined constants: SPRAY_*
:param int id_spray: spray handle (pointer returned by Spray_Add). Leave to -1 to apply to all simulated sprays.
.. seealso:: :func:`~robolink.Robolink.Spray_Add`, :func:`~robolink.Robolink.Spray_GetStats`, :func:`~robolink.Robolink.Spray_Clear`
"""
self._check_connection()
command = 'Gun_SetState'
self._send_line(command)
self._send_int(id_spray)
self._send_int(state)
success = self._rec_int()
self._check_status()
return success
def Spray_GetStats(self, id_spray=-1):
"""Gets statistics from all simulated spray guns or a specific spray gun.
:param int id_spray: spray handle (pointer returned by Spray_Add). Leave to -1 to apply to all simulated sprays.
.. seealso:: :func:`~robolink.Robolink.Spray_Add`, :func:`~robolink.Robolink.Spray_SetState`, :func:`~robolink.Robolink.Spray_Clear`
"""
self._check_connection()
command = 'Gun_Stats'
self._send_line(command)
self._send_int(id_spray)
info = self._rec_line()
info.replace('<br>','\t')
print(info)
data = self._rec_matrix()
self._check_status()
return info, data
def Spray_Clear(self, id_spray=-1):
"""Stops simulating a spray gun. This will clear the simulated particles.
:param int id_spray: spray handle (pointer returned by Spray_Add). Leave the default -1 to apply to all simulated sprays.
.. seealso:: :func:`~robolink.Robolink.Spray_Add`, :func:`~robolink.Robolink.Spray_SetState`, :func:`~robolink.Robolink.Spray_GetStats`
"""
self._check_connection()
command = 'Gun_Clear'
self._send_line(command)
self._send_int(id_spray)
success = self._rec_int()
self._check_status()
return success
def License(self):
"""Get the license string"""
self._check_connection()
command = 'G_License2'
self._send_line(command)
lic_name = self._rec_line()
lic_cid = self._rec_line()
self._check_status()
return lic_name, lic_cid
def Selection(self):
"""Return the list of currently selected items
:return: List of items
:rtype: list of :class:`.Item`"""
self._check_connection()
command = 'G_Selection'
self._send_line(command)
nitems = self._rec_int()
item_list = []
for i in range(nitems):
item_list.append(self._rec_item())
self._check_status()
return item_list
def setSelection(self, list_items=[]):
"""Set the selection in the tree
:param list list_items: List of items to set as selected"""
self._require_build(8896)
self._check_connection()
command = 'S_Selection'
self._send_line(command)
nitems = self._send_int(len(list_items))
for itm in list_items:
self._send_item(itm)
self._check_status()
def MergeItems(self, list_items=[]):
"""Merge multiple object items as one. Source objects are not deleted and a new object is created.
:param list list_items: List of items to set as selected
:return: New object created
:rtype: :class:`.Item`"""
self._require_build(8896)
self._check_connection()
command = 'MergeItems'
self._send_line(command)
nitems = self._send_int(len(list_items))
for itm in list_items:
self._send_item(itm)
newitem = self._rec_item()
self._check_status()
return newitem
def Popup_ISO9283_CubeProgram(self, robot=0):
"""Popup the menu to create the ISO9283 cube program (Utilities-Create Cube ISO)
:return: Created program. The program is invalid.
:rtype: :class:`.Item`"""
self._require_build(5177)
self._check_connection()
command = 'Popup_ProgISO9283'
self._send_line(command)
self._send_item(robot)
self.COM.settimeout(3600)
iso_program = self._rec_item()
self.COM.settimeout(self.TIMEOUT)
self._check_status()
return iso_program
def setInteractiveMode(self, mode_type=SELECT_MOVE, default_ref_flags=DISPLAY_REF_DEFAULT, custom_objects=None, custom_ref_flags=None):
"""Set the interactive mode to define the behavior when navigating and selecting items in RoboDK's 3D view.
:param int mode_type: The mode type defines what accion occurs when the 3D view is selected (Select object, Pan, Rotate, Zoom, Move Objects, ...)
:param int default_ref_flags: When a movement is specified, we can provide what motion we allow by default with respect to the coordinate system (set apropriate flags)
:param list custom_objects: Provide a list of optional items to customize the move behavior for these specific items (important: the lenght of custom_ref_flags must match)
:param list custom_ref_flags: Provide a matching list of flags to customize the movement behavior for specific items
"""
self._check_connection()
command = 'S_InteractiveMode'
self._send_line(command)
self._send_int(mode_type)
self._send_int(default_ref_flags)
if custom_objects is None or custom_ref_flags is None:
self._send_int(-1)
else:
nitems = min(len(custom_objects),len(custom_ref_flags))
self._send_int(nitems)
for i in range(nitems):
self._send_item(custom_objects[i])
self._send_int(custom_ref_flags[i])
self._check_status()
def CursorXYZ(self, x_coord=-1, y_coord=-1):
"""Returns the position of the cursor as XYZ coordinates (by default), or the 3D position of a given set of 2D coordinates of the window (x & y coordinates in pixels from the top left corner)
The XYZ coordinates returned are given with respect to the RoboDK station (absolute reference).
If no coordinates are provided, the current position of the cursor is retrieved.
:param int x_coord: X coordinate in pixels
:param int y_coord: Y coordinate in pixels
.. code-block:: python
:caption: Example to retrieve the 3D point under the mouse cursor
RDK = Robolink()
while True:
xyz, item = RDK.CursorXYZ()
print(str(item) + " " + str(xyz))
"""
self._check_connection()
command = 'Proj2d3d'
self._send_line(command)
self._send_int(x_coord)
self._send_int(y_coord)
selection = self._rec_int()
item = self._rec_item()
xyz = self._rec_xyz()
self._check_status()
return xyz, item
def PluginLoad(self, plugin_name="", load=1):
"""Load or unload the specified plugin (path to DLL, dylib or SO file). If the plugin is already loaded it will unload the plugin and reload it. Pass an empty plugin_name to reload all plugins.
:param str plugin_name: name of the plugin or path (if it is not in the default directory.
:param int load: load the plugin (1/default) or unload (0)
.. code-block:: python
:caption: Example to load a plugin
RDK = Robolink()
RDK.PluginLoad("C:/RoboDK/bin/plugin/yourplugin.dll")
"""
self._check_connection()
command = 'PluginLoad'
self._send_line(command)
self._send_int(load)
self._check_status()
return xyz, item
def PluginCommand(self, plugin_name, plugin_command="", value=""):
"""Send a specific command to a RoboDK plugin. The command and value (optional) must be handled by your plugin. It returns the result as a string.
:param str plugin_name: The plugin name must match the PluginName() implementation in the RoboDK plugin.
:param str command: Specific command handled by your plugin
:param str value: Specific value (optional) handled by your plugin
"""
self._check_connection()
command = 'PluginCommand'
self._send_line(command)
self._send_line(plugin_name)
self._send_line(plugin_command)
self._send_line(str(value))
self.COM.settimeout(3600*24*7)
result = self._rec_line()
self.COM.settimeout(self.TIMEOUT)
self._check_status()
return result
def EmbedWindow(self, window_name, docked_name=None, size_w=-1, size_h=-1, pid=0, area_add=1, area_allowed=15, timeout=500):
"""Embed a window from a separate process in RoboDK as a docked window. Returns True if successful.
:param str window_name: The name of the window currently open. Make sure the window name is unique and it is a top level window
:param str docked_name: Name of the docked tab in RoboDK (optional, if different from the window name)
:param int pid: Process ID (optional)
:param int area_add: Set to 1 (right) or 2 (left) (default is 1)
:param int area_allowed: Areas allowed (default is 15:no constrain)
:param int timeout: Timeout to abort attempting to embed the window
.. seealso:: Use the static function: :func:`~robolink.EmbedWindow` (this function should usually be called on a separate thread)
"""
if not docked_name:
docked_name = window_name
self._check_connection()
command = 'WinProcDock'
self._send_line(command)
self._send_line(docked_name)
self._send_line(window_name)
self._send_array([size_w, size_h])
self._send_line(str(pid))
self._send_int(area_allowed)
self._send_int(area_add)
self._send_int(timeout)
result = self._rec_int()
self._check_status()
return result > 0
class Item():
"""The Item class represents an item in RoboDK station. An item can be a robot, a frame, a tool, an object, a target, ... any item visible in the station tree.
An item can also be seen as a node where other items can be attached to (child items).
Every item has one parent item/node and can have one or more child items/nodes.
RoboDK Items are automatically created and retrieved by generated by :class:`.Robolink` methods such as :func:`~robolink.Robolink.Item` and :func:`~robolink.Robolink.ItemUserPick`
.. seealso:: :func:`~robolink.Robolink.Item`, :func:`~robolink.Robolink.ItemUserPick`
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
tool = RDK.Item('Tool') # Get an item named Tool (name in the RoboDK station tree)
robot = RDK.Item('', ITEM_TYPE_ROBOT) # Get the first available robot
target = RDK.Item('Target 1', ITEM_TYPE_TARGET) # Get a target called "Target 1"
frame = RDK.ItemUserPick('Select a reference frame', ITEM_TYPE_FRAME) # Promt the user to select a reference frame
robot.setPoseFrame(frame)
robot.setPoseTool(tool)
robot.MoveJ(target) # Move the robot to the target using the selected reference frame
"""
def __init__(self, link, ptr_item=0, itemtype=-1):
self.item = ptr_item
self.link = link # it is recommended to keep the link as a reference and not a duplicate (otherwise it will establish a new connection at every call)
self.type = itemtype
def __repr__(self):
if self.Valid():
return ("RoboDK item (%i) of type %i" % (self.item, int(self.type)))
else:
return "RoboDK item (INVALID)"
def __eq__(self, other):
if other is None:
return False
return self.item == other.item
def __ne__(self, other):
if other is None:
return True
return self.item != other.item
def equals(self, item2):
"""Returns True if an item is the same as this item :class:`.Item`
:param item2: item to compare
:type item2: :class:`.Item`
"""
return self.item == item2.item
def RDK(self):
"""Returns the RoboDK link Robolink(). It is important to have different links (Robolink) for multithreaded applications.
.. seealso:: :func:`~robolink.Robolink.Finish`
"""
return self.link
#"""Generic item calls"""
def Type(self):
"""Return the type of the item (robot, object, tool, frame, ...).
Tip: Compare the returned value against ITEM_TYPE_* variables
.. seealso:: :func:`~robolink.Robolink.Item`
"""
self.link._check_connection()
command = 'G_Item_Type'
self.link._send_line(command)
self.link._send_item(self)
itemtype = self.link._rec_int()
self.link._check_status()
return itemtype
def Copy(self):
"""Copy the item to the clipboard (same as Ctrl+C). Use together with Paste() to duplicate items.
.. seealso:: :func:`~robolink.Robolink.Copy`, :func:`~robolink.Item.Paste`
"""
self.link.Copy(self.item)
def Paste(self):
"""Paste the copied :class:`.Item` from the clipboard as a child of this item (same as Ctrl+V)
Returns the new item created (pasted)
.. seealso:: :func:`~robolink.Robolink.Copy`, :func:`~robolink.Item.Copy`, :func:`~robolink.Item.Paste`
"""
return self.link.Paste(self.item)
def AddFile(self, filename):
"""Adds an object attached to this object
:param str filename: file path
.. seealso:: :func:`~robolink.Robolink.AddFile`, :func:`~robolink.Item.Save`
"""
return self.link.AddFile(filename, self.item)
def Save(self, filename):
"""Save a station or object to a file
:param str filename: file to save. Use *.rdk name for RoboDK stations, *.stl file for objects, *.robot file for robots, ...
.. seealso:: :func:`~robolink.Robolink.AddFile`, :func:`~robolink.Item.AddFile`
"""
self.link.Save(filename, self.item)
def Collision(self, item_check):
"""Returns True if this item is in a collision state with another :class:`.Item`, otherwise it returns False.
:param item_check: item to check for collisions
:type item_check: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.Collision`
"""
return self.link.Collision(self.item, item_check)
def IsInside(self, object):
"""Return True if the object is inside the provided object
:param object: object to check
:type object: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.IsInside`
"""
return self.link.IsInside(self.item, object)
def AddGeometry(self, fromitem, pose):
"""Makes a copy of the geometry fromitem adding it at a given position (pose), relative to this item."""
self.link._check_connection()
command = 'CopyFaces'
self.link._send_line(command)
self.link._send_item(fromitem)
self.link._send_item(self)
self.link._send_pose(pose)
self.link._check_status()
def Delete(self):
"""Remove this item and all its children from the station.
.. seealso:: :func:`~robolink.Robolink.AddFile`, :func:`~robolink.Robolink.Item`
"""
if self.item == 0:
raise InputError("Item is not valid or was already deleted")
self.link._check_connection()
command = 'Remove'
self.link._send_line(command)
self.link._send_item(self)
self.link._check_status()
self.item = 0
def Valid(self):
"""Checks if the item is valid.
Returns True if the item is valid or False if the item is not valid.
An invalid item will be returned by an unsuccessful function call (wrong name or because an item was deleted)
.. seealso:: :func:`~robolink.Robolink.Item`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
tool = RDK.Item('Tool') # Retrieve an item named tool
if not tool.Valid():
print("The tool item does not exist!")
quit()
"""
if self.item == 0: return False
return True
def setParent(self, parent):
"""Attaches the item to a new parent while maintaining the relative position with its parent.
The absolute position is changed.
:param parent: parent to attach the item
:type parent: :class:`.Item`
.. seealso:: :func:`~robolink.Item.setParentStatic`
"""
self.link._check_connection()
command = 'S_Parent'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_item(parent)
self.link._check_status()
return parent
def setParentStatic(self, parent):
"""Attaches the item to another parent while maintaining the current absolute position in the station.
The relationship between this item and its parent is changed to maintain the abosolute position.
:param parent: parent to attach the item
:type parent: :class:`.Item`
.. seealso:: :func:`~robolink.Item.setParent`
"""
self.link._check_connection()
command = 'S_Parent_Static'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_item(parent)
self.link._check_status()
def AttachClosest(self):
"""Attach the closest object to the tool.
Returns the item that was attached.
Use item.Valid() to check if an object was attached to the tool.
.. seealso:: :func:`~robolink.Item.setParentStatic`
"""
self.link._check_connection()
command = 'Attach_Closest'
self.link._send_line(command)
self.link._send_item(self)
item_attached = self.link._rec_item()
self.link._check_status()
return item_attached
def DetachClosest(self, parent=0):
"""Detach the closest object attached to the tool (see also: setParentStatic).
:param parent: New parent item to attach, such as a reference frame (optional). If not provided, the items held by the tool will be placed at the station root.
:type parent: :class:`.Item`
.. seealso:: :func:`~robolink.Item.setParentStatic`
"""
self.link._check_connection()
command = 'Detach_Closest'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_item(parent)
item_detached = self.link._rec_item()
self.link._check_status()
return item_detached
def DetachAll(self, parent=0):
"""Detaches any object attached to a tool.
:param parent: New parent item to attach, such as a reference frame (optional). If not provided, the items held by the tool will be placed at the station root.
:type parent: :class:`.Item`
.. seealso:: :func:`~robolink.Item.setParentStatic`
"""
self.link._check_connection()
command = 'Detach_All'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_item(parent)
self.link._check_status()
def Parent(self):
"""Return the parent item of this item (:class:`.Item`)
.. seealso:: :func:`~robolink.Item.Childs`
"""
self.link._check_connection()
command = 'G_Parent'
self.link._send_line(command)
self.link._send_item(self)
parent = self.link._rec_item()
self.link._check_status()
return parent
def Childs(self):
"""Return a list of the childs items (list of :class:`.Item`) that are attached to this item.
Exceptionally, if Childs is called on a program it will return the list of subprograms called by this program.
.. seealso:: :func:`~robolink.Item.Parent`
"""
self.link._check_connection()
command = 'G_Childs'
self.link._send_line(command)
self.link._send_item(self)
nitems = self.link._rec_int()
itemlist = []
for i in range(nitems):
itemlist.append(self.link._rec_item())
self.link._check_status()
return itemlist
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def Visible(self):
"""Returns 1 if the item is visible, otherwise it returns 0.
.. seealso:: :func:`~robolink.Item.setVisible`
"""
self.link._check_connection()
command = 'G_Visible'
self.link._send_line(command)
self.link._send_item(self)
visible = self.link._rec_int()
self.link._check_status()
return visible
def setVisible(self, visible, visible_frame=None):
"""Sets the item visiblity.
:param bool visible: Set the object as visible (1/True) or invisible (0/False)
:param bool visible_frame: Set the object reference frame as visible (1/True) or invisible (0/False). It is also possible to provide flags to control the visibility of each robot link (only for robot items). When the item is a robot, this variable can specify robot visibility using suitable flags (as shown in the example).
Example:
.. code-block:: python
:caption: Change robot visibility
# Retrieve the robot (first robot available)
robot = RDK.Item('', ITEM_TYPE_ROBOT)
# Show the robot with default settings:
robot.setVisible(True, VISIBLE_ROBOT_DEFAULT)
# Show the robot and hide all references:
robot.setVisible(1, VISIBLE_ROBOT_DEFAULT and not VISIBLE_ROBOT_ALL_REFS)
# Show only references (hide the robot):
robot.setVisible(1, VISIBLE_ROBOT_ALL_REFS)
.. code-block:: python
:caption: Available Frame flags
# Default values for objects
VISIBLE_REFERENCE_DEFAULT = -1
VISIBLE_REFERENCE_ON = 1 # For objects and reference frames only
VISIBLE_REFERENCE_OFF = 0 # For objects and reference frames only
# Available flags to set robot visiblity
VISIBLE_ROBOT_NONE = 0
VISIBLE_ROBOT_FLANGE = 0x01
VISIBLE_ROBOT_AXIS_Base_3D = 0x01 << 1
VISIBLE_ROBOT_AXIS_Base_REF = 0x01 << 2
VISIBLE_ROBOT_AXIS_1_3D = 0x01 << 3
VISIBLE_ROBOT_AXIS_1_REF = 0x01 << 4
VISIBLE_ROBOT_AXIS_2_3D = 0x01 << 5
VISIBLE_ROBOT_AXIS_2_REF = 0x01 << 6
VISIBLE_ROBOT_AXIS_3_3D = 0x01 << 7
VISIBLE_ROBOT_AXIS_3_REF = 0x01 << 8
VISIBLE_ROBOT_AXIS_4_3D = 0x01 << 9
VISIBLE_ROBOT_AXIS_4_REF = 0x01 << 10
VISIBLE_ROBOT_AXIS_5_3D = 0x01 << 11
VISIBLE_ROBOT_AXIS_5_REF = 0x01 << 12
VISIBLE_ROBOT_AXIS_6_3D = 0x01 << 13
VISIBLE_ROBOT_AXIS_6_REF = 0x01 << 14
VISIBLE_ROBOT_AXIS_7_3D = 0x01 << 15
VISIBLE_ROBOT_AXIS_7_REF = 0x02 << 16
VISIBLE_ROBOT_DEFAULT = 0x2AAAAAAB
VISIBLE_ROBOT_ALL = 0x7FFFFFFF
VISIBLE_ROBOT_ALL_REFS = 0x15555555
.. seealso:: :func:`~robolink.Item.Visible`
"""
if visible_frame is None:
visible_frame = -1
elif visible_frame is False:
visible_frame = -2
elif visible_frame is True:
visible_frame = -3
self.link._check_connection()
command = 'S_Visible'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(visible)
self.link._send_int(visible_frame)
self.link._check_status()
return self
def Name(self):
"""Returns the item name. The name of the item is always displayed in the RoboDK station tree.
Returns the name as a string (str)
:return: New item name
:rtype: str
.. seealso:: :func:`~robolink.Item.setName`
"""
self.link._check_connection()
command = 'G_Name'
self.link._send_line(command)
self.link._send_item(self)
name = self.link._rec_line()
self.link._check_status()
return name
def setName(self, name):
"""Set the name of the item. The name of the item will be displayed in the station tree.
:param str name: New item name
.. seealso:: :func:`~robolink.Item.Name`
"""
self.link._check_connection()
command = 'S_Name'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(name)
self.link._check_status()
return self
def setValue(self, varname, value):
"""Set a specific property name to a given value. This is reserved for internal purposes and future compatibility.
:param str varname: property name
:param str value: property value
.. seealso:: :func:`~robolink.Robolink.Command`, :func:`~robolink.Item.setParam`
"""
self.link._check_connection()
if isinstance(value, Mat):
command = 'S_Gen_Mat'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(varname)
self.link._send_matrix(value)
elif isinstance(value,str):
command = 'S_Gen_Str'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(varname)
self.link._send_line(value)
else:
raise Exception("Unsupported value type")
self.link._check_status()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def setPose(self, pose):
"""Set the position (pose) of the item with respect to its parent (item it is attached to).
For example, the position of an object, frame or target with respect to its parent reference frame.
:param pose: pose of the item with respect to its parent
:type pose: :class:`.Mat`
.. seealso:: :func:`~robolink.Item.Pose`, :func:`~robolink.Item.setPoseTool`, :func:`~robolink.Item.setPoseFrame`, :func:`~robolink.Robolink.Item`
"""
self.link._check_connection()
command = 'S_Hlocal'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_pose(pose)
self.link._check_status()
return self
def Pose(self):
"""Returns the relative position (pose) of an object, target or reference frame. For example, the position of an object, target or reference frame with respect to its parent.
If a robot is provided, it will provide the pose of the end efector with respect to the robot base (same as PoseTool())
Returns the pose as :class:`.Mat`.
Tip: Use a Pose_2_* function from the robodk module (such as :class:`robodk.Pose_2_KUKA`) to convert the pose to XYZABC (XYZ position in mm and ABC orientation in degrees), specific to a robot brand.
Example: :ref:`weldexample`
.. seealso:: :func:`~robolink.Item.Pose`, :func:`~robolink.Item.PoseTool`, :func:`~robolink.Item.PoseFrame`, :func:`~robolink.Robolink.Item`
"""
self.link._check_connection()
command = 'G_Hlocal'
self.link._send_line(command)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
return pose
def setGeometryPose(self, pose):
"""Set the position (pose) the object geometry with respect to its own reference frame. This can be applied to tools and objects.
The pose must be a :class:`.Mat`"""
self.link._check_connection()
command = 'S_Hgeom'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_pose(pose)
self.link._check_status()
def GeometryPose(self):
"""Returns the position (pose as :class:`.Mat`) the object geometry with respect to its own reference frame. This procedure works for tools and objects.
"""
self.link._check_connection()
command = 'G_Hgeom'
self.link._send_line(command)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
return pose
def setPoseAbs(self, pose):
"""Sets the position of the item given the pose (:class:`.Mat`) with respect to the absolute reference frame (station reference)
:param pose: pose of the item with respect to the station reference
:type pose: :class:`.Mat`
.. seealso:: :func:`~robolink.Item.PoseAbs`, :func:`~robolink.Item.setPose`
"""
self.link._check_connection()
command = 'S_Hlocal_Abs'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_pose(pose)
self.link._check_status()
return self
def PoseAbs(self):
"""Return the position (:class:`.Mat`) of this item given the pose with respect to the absolute reference frame (station reference)
For example, the position of an object/frame/target with respect to the origin of the station.
.. seealso:: :func:`~robolink.Item.setPoseAbs`, :func:`~robolink.Item.Pose`
"""
self.link._check_connection()
command = 'G_Hlocal_Abs'
self.link._send_line(command)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
return pose
def Recolor(self, tocolor, fromcolor=None, tolerance=None):
"""Changes the color of an :class:`.Item` (object, tool or robot).
Colors must in the format COLOR=[R,G,B,(A=1)] where all values range from 0 to 1.
Alpha (A) defaults to 1 (100% opaque). Set A to 0 to make an object transparent.
:param tocolor: color to set
:type tocolor: list of float
:param fromcolor: color to change
:type fromcolor: list of float
:param tolerance: tolerance to replace colors (set to 0 for exact match)
:type tolerance: float (defaults to 0.1)
.. seealso:: :func:`~robolink.Item.setColor`
"""
self.link._check_connection()
if not fromcolor:
fromcolor = [0,0,0,0]
tolerance = 2
elif not tolerance:
tolerance= 0.1
if not (isinstance(tolerance,int) or isinstance(tolerance,float)):
raise Exception("tolerance must be a scalar")
tocolor = self.link._check_color(tocolor)
fromcolor = self.link._check_color(fromcolor)
command = 'Recolor'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array([tolerance] + fromcolor + tocolor)
self.link._check_status()
def setColor(self, tocolor):
"""Set the color of an object, tool or robot.
A color must in the format COLOR=[R,G,B,(A=1)] where all values range from 0 to 1.
:param tocolor: color to set
:type tocolor: list of float
.. seealso:: :func:`~robolink.Item.Color`, :func:`~robolink.Item.Recolor`
"""
self.link._check_connection()
tocolor = self.link._check_color(tocolor)
command = 'S_Color'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(tocolor)
self.link._check_status()
def setColorShape(self, tocolor, shape_id):
"""Set the color of an object shape. It can also be used for tools.
A color must in the format COLOR=[R,G,B,(A=1)] where all values range from 0 to 1.
:param tocolor: color to set
:type tocolor: list of float
:param int shape_id: ID of the shape: the ID is the order in which the shape was added using AddShape()
.. seealso:: :func:`~robolink.Item.Color`, :func:`~robolink.Item.Recolor`
"""
self.link._check_connection()
tocolor = self.link._check_color(tocolor)
command = 'S_ShapeColor'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(shape_id)
self.link._send_array(tocolor)
self.link._check_status()
def setColorCurve(self, tocolor, curve_id=-1):
"""Set the color of a curve object. It can also be used for tools.
A color must in the format COLOR=[R,G,B,(A=1)] where all values range from 0 to 1.
:param tocolor: color to set
:type tocolor: list of float
:param int curve_id: ID of the curve: the ID is the order in which the shape was added using AddCurve()
.. seealso:: :func:`~robolink.Item.Color`, :func:`~robolink.Item.Recolor`
"""
self.link._check_connection()
tocolor = self.link._check_color(tocolor)
command = 'S_CurveColor'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(curve_id)
self.link._send_array(tocolor)
self.link._check_status()
def Color(self):
"""Return the color of an :class:`.Item` (object, tool or robot). If the item has multiple colors it returns the first color available).
A color is in the format COLOR=[R,G,B,(A=1)] where all values range from 0 to 1.
.. seealso:: :func:`~robolink.Item.setColor`, :func:`~robolink.Item.Recolor`
"""
self.link._check_connection()
command = 'G_Color'
self.link._send_line(command)
self.link._send_item(self)
color = self.link._rec_array()
self.link._check_status()
return color.tolist()
def Scale(self, scale, pre_mult=None, post_mult=None):
"""Apply a scale to an object to make it bigger or smaller.
The scale can be uniform (if scale is a float value) or per axis (if scale is an array/list [scale_x, scale_y, scale_z]).
:param scale: scale parameter (1 means no change)
:type scale: float or list of 3 float [scale_x, scale_y, scale_z]
:param pre_mult: pre multiplication to apply before the scaling(optional)
:param post_mult: post multiplication to apply after the scaling (optional)"""
if pre_mult is not None or post_mult is not None:
if pre_mult is None:
pre_mult = eye(4)
if post_mult is None:
post_mult = invH(pre_mult)
self.link._check_connection()
if isinstance(scale,float) or isinstance(scale,int):
scale = [scale, scale, scale]
elif len(scale) > 3:
scale = scale[:3]
elif len(scale) < 3:
raise Exception("scale must be a single value or a 3-vector value")
command = 'TrScale'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(scale)
self.link._send_pose(pre_mult)
self.link._send_pose(post_mult)
status = self.link._rec_int()
self.link._check_status()
return status > 0
else:
self.link._check_connection()
if isinstance(scale,float) or isinstance(scale,int):
scale = [scale, scale, scale]
elif len(scale) > 3:
scale = scale[:3]
elif len(scale) < 3:
raise Exception("scale must be a single value or a 3-vector value")
command = 'Scale'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(scale)
self.link._check_status()
return None
#"""Object specific calls"""
def AddShape(self, triangle_points):
"""Adds a shape to the object provided some triangle coordinates. Triangles must be provided as a list of vertices. A vertex normal can be optionally provided.
.. seealso:: :func:`~robolink.Robolink.AddShape`
"""
return self.link.AddShape(triangle_points, self)
def AddCurve(self, curve_points, add_to_ref=False, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Adds a curve provided point coordinates. The provided points must be a list of vertices. A vertex normal can be provided optionally.
.. seealso:: :func:`~robolink.Robolink.AddCurve`
"""
return self.link.AddCurve(curve_points, self, add_to_ref, projection_type)
def AddPoints(self, points, add_to_ref=False, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Adds a list of points to an object. The provided points must be a list of vertices. A vertex normal can be provided optionally.
.. seealso:: :func:`~robolink.Robolink.AddPoints`
"""
return self.link.AddPoints(points, self, add_to_ref, projection_type)
def ProjectPoints(self, points, projection_type=PROJECTION_ALONG_NORMAL_RECALC):
"""Projects a point or a list of points to the object given its coordinates. The provided points must be a list of [XYZ] coordinates. Optionally, a vertex normal can be provided [XYZijk].
.. seealso:: :func:`~robolink.Robolink.ProjectPoints`
"""
return self.link.ProjectPoints(points, self, projection_type)
def SelectedFeature(self):
"""Retrieve the currently selected feature for this object.
.. seealso:: :func:`~robolink.Robolink.GetPoints`
Example:
.. code-block:: python
# Show the point selected
object = RDK.Item('Object', ITEM_TYPE_OBJECT)
is_selected, feature_type, feature_id = OBJECT.SelectedFeature()
points, name_selected = object.GetPoints(feature_type, feature_id)
point = None
if len(points) > 1:
point = points[feature_id]
else:
point = points[0]
RDK.ShowMessage("Selected Point: %s = [%.3f, %.3f, %.3f]" % (name_selected, point[0], point[1], point[2]))
"""
self.link._check_connection()
command = 'G_ObjSelection'
self.link._send_line(command)
self.link._send_item(self)
is_selected = self.link._rec_int()
feature_type = self.link._rec_int()
feature_id = self.link._rec_int()
self.link._check_status()
return is_selected, feature_type, feature_id
def GetPoints(self, feature_type=FEATURE_SURFACE, feature_id=0):
"""Retrieves the point under the mouse cursor, a curve or the 3D points of an object. The points are provided in [XYZijk] format in relative coordinates. The XYZ are the local point coordinate and ijk is the normal of the surface.
:param int feature_type: set to FEATURE_SURFACE to retrieve the point under the mouse cursor, FEATURE_CURVE to retrieve the list of points for that wire, or FEATURE_POINT to retrieve the list of points.
:param int feature_id: used only if FEATURE_CURVE is specified, it allows retrieving the appropriate curve id of an object
:return: List of points
.. code-block:: python
# Example to display the XYZ position of a selected object
from robolink import * # Import the RoboDK API
RDK = Robolink() # Start RoboDK API
# Ask the user to select an object
OBJECT = RDK.ItemUserPick("Select an object", ITEM_TYPE_OBJECT)
while True:
is_selected, feature_type, feature_id = OBJECT.SelectedFeature()
if is_selected and feature_type == FEATURE_SURFACE:
point_mouse, name_feature = OBJECT.GetPoints(FEATURE_SURFACE)
print("Selected %i (%i): %s %s" % (feature_id, feature_type, str(point_mouse), name_feature))
else:
print("Object Not Selected. Select a point in the object surface...")
pause(0.1)
.. seealso:: :func:`~robolink.Item.SelectedFeature`
"""
self.link._check_connection()
command = 'G_ObjPoint'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(feature_type)
self.link._send_int(feature_id)
points = self.link._rec_matrix()
feature_name = self.link._rec_line()
self.link._check_status()
return list(points), feature_name
def setMillingParameters(self, ncfile='', part=0, params=''):
"""Obsolete, use :func:`~robolink.Item.setMachiningParameters` instead"""
newprog, status = self.setMachiningParameters(ncfile,part,params)
return newprog, status
def setMachiningParameters(self, ncfile='', part=0, params=''):
"""Update the robot milling path input and parameters. Parameter input can be an NC file (G-code or APT file) or an object item in RoboDK. A curve or a point follow project will be automatically set up for a robot manufacturing project.
Tip: Use getLink(), setPoseTool(), setPoseFrame() to get/set the robot tool, reference frame, robot and program linked to the project.
Tip: Use setPose() and setJoints() to update the path to tool orientation or the preferred start joints.
:param str ncfile: path to the NC file (G-code or APT) to be loaded (optional)
:param part: object holding curves or points to automatically set up a curve/point follow project (optional)
:type part: :class:`.Item`
:param params: Additional options
.. seealso:: :func:`~robolink.Robolink.AddMachiningProject`, :func:`~robolink.Item.Joints`, :func:`~robolink.Item.getLink`, :func:`~robolink.Item.setJoints`, :func:`~robolink.Item.setToolPose`, :func:`~robolink.Item.setFramePose`
Example:
.. code-block:: python
object_curve = RDK.AddCurve(POINTS)
object_curve.setName('AutoPoints n%i' % NUM_POINTS)
path_settings = RDK.AddMillingProject("AutoCurveFollow settings")
prog, status = path_settings.setMillingParameters(part=object_curve)
"""
self.link._check_connection()
command = 'S_MachiningParams'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(ncfile)
self.link._send_item(part)
self.link._send_line(params)
self.link.COM.settimeout(3600)
newprog = self.link._rec_item()
self.link.COM.settimeout(self.link.TIMEOUT)
status = self.link._rec_int()/1000.0
self.link._check_status()
return newprog, status
#"""Target item calls"""
def setAsCartesianTarget(self):
"""Sets a target as a cartesian target. A cartesian target moves to cartesian coordinates.
.. seealso:: :func:`~robolink.Robolink.AddTarget`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setAsJointTarget`
"""
self.link._check_connection()
command = 'S_Target_As_RT'
self.link._send_line(command)
self.link._send_item(self)
self.link._check_status()
return self
def setAsJointTarget(self):
"""Sets a target as a joint target. A joint target moves to the joint position without taking into account the cartesian coordinates.
.. seealso:: :func:`~robolink.Robolink.AddTarget`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setAsCartesianTarget`
"""
self.link._check_connection()
command = 'S_Target_As_JT'
self.link._send_line(command)
self.link._send_item(self)
self.link._check_status()
return self
def isJointTarget(self):
"""Returns True if a target is a joint target. A joint target moves to the joint position without taking into account the cartesian coordinates.
.. seealso:: :func:`~robolink.Robolink.AddTarget`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setAsCartesianTarget, :func:`~robolink.Item.setAsJointTarget`
"""
self.link._check_connection()
command = 'Target_Is_JT'
self.link._send_line(command)
self.link._send_item(self)
isjt = self.link._rec_int()
self.link._check_status()
return isjt > 0
#"""Robot item calls"""
def Joints(self):
"""Return the current joint position as a :class:`.Mat` of a robot or the joints of a target.
If the item is a cartesian target, it returns the preferred joints (configuration) to go to that cartesian position.
.. seealso:: :func:`~robolink.Item.setJoints`, :func:`~robolink.Item.MoveJ`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started)
tool = RDK.Item('', ITEM_TYPE_ROBOT) # Retrieve the robot
joints = robot.Joints().list() # retrieve the current robot joints as a list
joints[5] = 0 # set joint 6 to 0 deg
robot.MoveJ(joints) # move the robot to the new joint position
"""
self.link._check_connection()
command = 'G_Thetas'
self.link._send_line(command)
self.link._send_item(self)
joints = self.link._rec_array()
self.link._check_status()
return joints
def SimulatorJoints(self):
"""Return the current joint position of a robot (only from the simulator, never from the real robot).
This should be used only when RoboDK is connected to the real robot and only the simulated robot needs to be retrieved (for example, if we want to move the robot using a spacemouse).
Note: Use robot.Joints() instead to retrieve the simulated and real robot position when connected.
.. seealso:: :func:`~robolink.Item.Joints`
"""
self.link._check_connection()
command = 'G_Thetas_Sim'
self.link._send_line(command)
self.link._send_item(self)
joints = self.link._rec_array()
self.link._check_status()
return joints.list()
def JointPoses(self, joints = None):
"""Returns the positions of the joint links for a provided robot configuration (joints). If no joints are provided it will return the poses for the current robot position.
Out 1 : 4x4 x n -> array of 4x4 homogeneous matrices. Index 0 is the base frame reference (it never moves when the joints move).
"""
self.link._check_connection()
command = 'G_LinkPoses'
self.link._send_line(command)
self.link._send_item(self)
if joints is None:
self.link._send_array([])
else:
self.link._send_array(joints)
nlinks = self.link._rec_int()
poses = []
for i in range(nlinks):
poses.append(self.link._rec_pose())
self.link._check_status()
return poses
def JointsHome(self):
"""Return the home joints of a robot.
The home joints can be manually set in the robot "Parameters" menu of the robot panel in RoboDK, then select "Set home position".
.. seealso:: :func:`~robolink.Item.Joints`
"""
self.link._check_connection()
command = 'G_Home'
self.link._send_line(command)
self.link._send_item(self)
joints = self.link._rec_array()
self.link._check_status()
return joints
def setJointsHome(self, joints):
"""Set the home position of the robot in the joint space.
:param joints: robot joints
:type joints: list of float or :class:`.Mat`
.. seealso:: :func:`~robolink.Item.setJoints`
"""
self.link._check_connection()
command = 'S_Home'
self.link._send_line(command)
self.link._send_array(joints)
self.link._send_item(self)
self.link._check_status()
return self
def ObjectLink(self, link_id=0):
"""Returns an item pointer (:class:`.Item`) to a robot link. This is useful to show/hide certain robot links or alter their geometry.
:param int link_id: link index (0 for the robot base, 1 for the first link, ...)
"""
self.link._check_connection()
command = 'G_LinkObjId'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(link_id)
item = self.link._rec_item()
self.link._check_status()
return item
def getLink(self, type_linked=ITEM_TYPE_ROBOT):
"""Returns an item pointer (:class:`.Item`) to a robot, object, tool or program. This is useful to retrieve the relationship between programs, robots, tools and other specific projects.
:param int type_linked: type of linked object to retrieve
"""
self.link._check_connection()
command = 'G_LinkType'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(type_linked)
item = self.link._rec_item()
self.link._check_status()
return item
def setJoints(self, joints):
"""Set the current joints of a robot or a target. If robot joints are set, the robot position will be updated on the screen.
:param joints: robot joints
:type joints: list of float or :class:`.Mat`
.. seealso:: :func:`~robolink.Item.Joints`
"""
self.link._check_connection()
command = 'S_Thetas'
self.link._send_line(command)
self.link._send_array(joints)
self.link._send_item(self)
self.link._check_status()
return self
def JointLimits(self):
"""Retrieve the joint limits of a robot. Returns (lower limits, upper limits, joint type).
.. seealso:: :func:`~robolink.Item.setJointLimits`
"""
self.link._check_connection()
command = 'G_RobLimits'
self.link._send_line(command)
self.link._send_item(self)
lim_inf = self.link._rec_array()
lim_sup = self.link._rec_array()
joints_type = self.link._rec_int()/1000.0
self.link._check_status()
return lim_inf, lim_sup, joints_type
def setJointLimits(self, lower_limit, upper_limit):
"""Update the robot joint limits
:param lower_limit: lower joint limits
:type lower_limit: list of float
:param upper_limit: upper joint limits
:type upper_limit: list of float
.. seealso:: :func:`~robolink.Item.JointLimits`
"""
self.link._check_connection()
command = 'S_RobLimits'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(lower_limit)
self.link._send_array(upper_limit)
self.link._check_status()
def setRobot(self, robot=None):
"""Assigns a specific robot to a program, target or robot machining project.
:param robot: robot to link
:type robot: :class:`.Item`
"""
if robot is None:
robot = Item(self.link)
self.link._check_connection()
command = 'S_Robot'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_item(robot)
self.link._check_status()
return self
def setPoseFrame(self, frame):
"""Sets the reference frame of a robot (user frame). The frame can be an item or a 4x4 Matrix
:param frame: robot reference frame as an item, or a pose
:type frame: :class:`.Mat` or :class:`.Item`
.. seealso:: :func:`~robolink.Item.PoseFrame`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setPoseTool`
"""
self.link._check_connection()
if isinstance(frame,Item):
command = 'S_Frame_ptr'
self.link._send_line(command)
self.link._send_item(frame)
else:
command = 'S_Frame'
self.link._send_line(command)
self.link._send_pose(frame)
self.link._send_item(self)
self.link._check_status()
return self
def setPoseTool(self, tool):
"""Set the robot tool pose (TCP) with respect to the robot flange. The tool pose can be an item or a 4x4 Matrix
:param tool: robot tool as an item, or a pose
:type tool: :class:`.Mat` or :class:`.Item`
.. seealso:: :func:`~robolink.Item.PoseTool`, :func:`~robolink.Item.setPose`, :func:`~robolink.Item.setPoseFrame`"""
self.link._check_connection()
if isinstance(tool,Item):
command = 'S_Tool_ptr'
self.link._send_line(command)
self.link._send_item(tool)
else:
command = 'S_Tool'
self.link._send_line(command)
self.link._send_pose(tool)
self.link._send_item(self)
self.link._check_status()
return self
def PoseTool(self):
"""Returns the pose (:class:`.Mat`) of the robot tool (TCP) with respect to the robot flange
.. seealso:: :func:`~robolink.Item.setPoseTool`, :func:`~robolink.Item.Pose`, :func:`~robolink.Item.PoseFrame`
"""
self.link._check_connection()
command = 'G_Tool'
self.link._send_line(command)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
return pose
def PoseFrame(self):
"""Returns the pose (:class:`.Mat`) of the robot reference frame with respect to the robot base
.. seealso:: :func:`~robolink.Item.setPoseFrame`, :func:`~robolink.Item.Pose`, :func:`~robolink.Item.PoseTool`
"""
self.link._check_connection()
command = 'G_Frame'
self.link._send_line(command)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
return pose
# Obsolete methods -----------------------
def Htool(self):
"""Obsolete. Use :func:`~robolink.Item.PoseTool` instead. Returns the pose (:class:`.Mat`) of the robot tool (TCP) with respect to the robot flange"""
return self.PoseTool()
def Tool(self):
"""Obsolete. Use :func:`~robolink.Item.PoseTool` instead. Returns the pose (:class:`.Mat`) of the robot tool (TCP) with respect to the robot flange"""
return self.PoseTool()
def Frame(self):
"""Obsolete. Use :func:`~robolink.Item.PoseFrame` instead. Returns the pose (:class:`.Mat`) of the robot reference frame with respect to the robot base"""
return self.PoseFrame()
def setHtool(self, tool):
"""Obsolete. :func:`~robolink.Item.setPoseTool` instead. Sets the robot tool pose (TCP) with respect to the robot flange. The tool pose can be an item or a 4x4 Matrix
"""
self.setPoseTool(tool)
def setTool(self, tool):
"""Obsolete. Use :func:`~robolink.Item.setPoseTool` instead. Sets the robot tool pose (TCP) with respect to the robot flange. The tool pose can be an item or a 4x4 Matrix
"""
self.setPoseTool(tool)
def setFrame(self, frame):
"""Obsolete. Use :func:`~robolink.Item.setPoseFrame` instead. Sets the reference frame of a robot (user frame). The frame can be an item or a 4x4 Matrix
"""
self.setPoseFrame(frame)
# -----------------------
def AddTool(self, tool_pose, tool_name = 'New TCP'):
"""Add a tool to a robot given the tool pose and the tool name. It returns the tool as an :class:`.Item`.
:param tool_pose: Tool pose (TCP) of the tool with respect to the robot flange
:type tool_pose: :class:`.Mat`
:param str tool_name: name of the tool
.. seealso:: :func:`~robolink.Robolink.AddFrame`, :func:`~robolink.Item.PoseTool`, :func:`~robolink.Item.setPoseTool`
"""
self.link._check_connection()
command = 'AddToolEmpty'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_pose(tool_pose)
self.link._send_line(tool_name)
newtool = self.link._rec_item()
self.link._check_status()
return newtool
def SolveFK(self, joints, tool=None, reference=None):
"""Calculate the forward kinematics of the robot for the provided joints.
Returns the pose of the robot flange with respect to the robot base reference (:class:`.Mat`).
:param joints: robot joints
:type joints: list of float or :class:`.Mat`
:param tool: Optionally provide the tool used to calculate the forward kinematics. If this parameter is ignored it will use the robot flange.
:type tool: :class:`.Mat`
:param reference: Optionally provide the reference frame used to calculate the forward kinematics. If this parameter is ignored it will use the robot base frame.
:type reference: :class:`.Mat`
.. seealso:: :func:`~robolink.Item.SolveIK`, :func:`~robolink.Item.SolveIK_All`, :func:`~robolink.Item.JointsConfig`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # connect to the RoboDK API (RoboDK starts if it has not started
robot = RDK.Item('', ITEM_TYPE_ROBOT) # Retrieve the robot
# get the current robot joints
robot_joints = robot.Joints()
# get the robot position from the joints (calculate forward kinematics)
robot_position = robot.SolveFK(robot_joints)
# get the robot configuration (robot joint state)
robot_config = robot.JointsConfig(robot_joints)
# calculate the new robot position
new_robot_position = transl([x_move,y_move,z_move])*robot_position
# calculate the new robot joints
new_robot_joints = robot.SolveIK(new_robot_position)
if len(new_robot_joints.tolist()) < 6:
print("No robot solution!! The new position is too far, out of reach or close to a singularity")
continue
# calculate the robot configuration for the new joints
new_robot_config = robot.JointsConfig(new_robot_joints)
if robot_config[0] != new_robot_config[0] or robot_config[1] != new_robot_config[1] or robot_config[2] != new_robot_config[2]:
print("Warning! Robot configuration changed: this may lead to unextected movements!")
print(robot_config)
print(new_robot_config)
# move the robot to the new position
robot.MoveJ(new_robot_joints)
#robot.MoveL(new_robot_joints)
"""
self.link._check_connection()
command = 'G_FK'
self.link._send_line(command)
self.link._send_array(joints)
self.link._send_item(self)
pose = self.link._rec_pose()
self.link._check_status()
if tool is not None:
pose = pose*tool
if reference is not None:
pose = invH(reference)*pose
return pose
def JointsConfig(self, joints):
"""Returns the robot configuration state for a set of robot joints.
The configuration state is defined as: [REAR, LOWERARM, FLIP]
:param joints: robot joints
:type joints: list of float
.. seealso:: :func:`~robolink.Item.SolveFK`, :func:`~robolink.Item.SolveIK`
"""
self.link._check_connection()
command = 'G_Thetas_Config'
self.link._send_line(command)
self.link._send_array(joints)
self.link._send_item(self)
config = self.link._rec_array()
self.link._check_status()
return config
def SolveIK(self, pose, joints_approx = None, tool=None, reference=None):
"""Calculates the inverse kinematics for the specified pose.
It returns the joints solution as a list of floats which are the closest match to the current robot configuration (see SolveIK_All()).
Optionally, specify a preferred robot position using the parameter joints_approx.
:param pose: pose of the robot flange with respect to the robot base frame
:type pose: :class:`.Mat`
:param joints_approx: approximate solution. Leave blank to return the closest match to the current robot position.
:type joints_approx: list of float
.. seealso:: :func:`~robolink.Item.SolveFK`, :func:`~robolink.Item.SolveIK_All`, :func:`~robolink.Item.JointsConfig`
"""
if tool is not None:
pose = pose*invH(tool)
if reference is not None:
pose = reference*pose
self.link._check_connection()
if joints_approx is None:
command = 'G_IK'
self.link._send_line(command)
self.link._send_pose(pose)
self.link._send_item(self)
joints = self.link._rec_array()
else:
command = 'G_IK_jnts'
self.link._send_line(command)
self.link._send_pose(pose)
self.link._send_array(joints_approx)
self.link._send_item(self)
joints = self.link._rec_array()
self.link._check_status()
return joints
def SolveIK_All(self, pose, tool=None, reference=None):
"""Calculates the inverse kinematics for the specified robot and pose. The function returns all available joint solutions as a 2D matrix.
Returns a list of joints as a 2D matrix (float x n x m)
:param pose: pose of the robot flange with respect to the robot base frame
:type pose: :class:`.Mat`
.. seealso:: :func:`~robolink.Item.SolveFK`, :func:`~robolink.Item.SolveIK`, :func:`~robolink.Item.JointsConfig`"""
if tool is not None:
pose = pose*invH(tool)
if reference is not None:
pose = reference*pose
self.link._check_connection()
command = 'G_IK_cmpl'
self.link._send_line(command)
self.link._send_pose(pose)
self.link._send_item(self)
joints_list = self.link._rec_matrix()
self.link._check_status()
return joints_list
def FilterTarget(self, pose, joints_approx=None):
"""Filters a target to improve accuracy. This option requires a calibrated robot.
:param pose: pose of the robot TCP with respect to the robot reference frame
:type pose: :class:`.Mat`
:param joints_approx: approximated desired joints to define the preferred configuration
:type joints_approx: list of float or :class:`.Mat`"""
self.link._check_connection()
command = 'FilterTarget'
self.link._send_line(command)
self.link._send_pose(pose)
if joints_approx is None:
joints_approx = [0,0,0,0,0,0]
self.link._send_array(joints_approx)
self.link._send_item(self)
pose_filtered = self.link._rec_pose()
joints_filtered = self.link._rec_array()
self.link._check_status()
return pose_filtered, joints_filtered
def Connect(self, robot_ip = '', blocking = True):
"""Connect to a real robot and wait for a connection to succeed. Returns 1 if connection succeeded, or 0 if it failed.
:param robot_ip: Robot IP. Leave blank to use the IP selected in the connection panel of the robot.
:type robot_ip: str
.. seealso:: :func:`~robolink.Item.ConnectSafe`, :func:`~robolink.Item.ConnectedState`, :func:`~robolink.Item.Disconnect`, :func:`~robolink.Robolink.setRunMode`
"""
self.link._check_connection()
command = 'Connect2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(robot_ip)
self.link._send_int(1 if blocking else 0)
status = self.link._rec_int()
self.link._check_status()
return status
def ConnectSafe(self, robot_ip = '', max_attempts=5, wait_connection=4, callback_abort=None):
"""Connect to a real robot and wait for a connection to succeed. Returns 1 if connection succeeded 0 if it failed.
:param robot_ip: Robot IP. Leave blank to use the IP selected in the connection panel of the robot.
:type robot_ip: str
:param max_attempts: maximum connection attemps before reporting an unsuccessful connection
:type max_attempts: int
:param wait_connection: time to wait in seconds between connection attempts
:type wait_connection: float
:param callback_abort: function pointer that returns true if we should abort the connection operation
:type callback_abort: function
.. seealso:: :func:`~robolink.Item.Connect`, :func:`~robolink.Item.ConnectedState`, :func:`~robolink.Robolink.setRunMode`
"""
trycount = 0
refresh_rate = 0.2
self.Connect(blocking=False)
tic()
timer1 = toc()
pause(refresh_rate)
while True:
# Wait up to 2 seconds to see the connected state
for i in range(10):
con_status, status_msg = self.ConnectedState()
print(status_msg)
if con_status == ROBOTCOM_READY:
return con_status
if callback_abort is not None and callback_abort():
return con_status
pause(refresh_rate)
if con_status < 0:
print('Trying to reconnect...')
self.Disconnect()
if callback_abort is not None and callback_abort():
return con_status
pause(refresh_rate)
self.Connect()
if toc() - timer1 > wait_connection:
timer1 = toc()
trycount = trycount + 1
if trycount >= max_attempts:
print('Failed to connect: Timed out')
break
if callback_abort is not None and callback_abort():
return con_status
pause(refresh_rate)
return con_status
def ConnectionParams(self):
"""Returns the robot connection parameters
:return: [robotIP (str), port (int), remote_path (str), FTP_user (str), FTP_pass (str)]
.. seealso:: :func:`~robolink.Item.setConnectionParams`, :func:`~robolink.Item.Connect`, :func:`~robolink.Item.ConnectSafe`
"""
self.link._check_connection()
command = 'ConnectParams'
self.link._send_line(command)
self.link._send_item(self)
robot_ip = self.link._rec_line()
port = self.link._rec_int()
remote_path = self.link._rec_line()
ftp_user = self.link._rec_line()
ftp_pass = self.link._rec_line()
self.link._check_status()
return robot_ip, port, remote_path, ftp_user, ftp_pass
def setConnectionParams(self, robot_ip, port, remote_path, ftp_user, ftp_pass):
"""Retrieve robot connection parameters
:param robot_ip: robot IP
:type robot_ip: str
:param port: robot communication port
:type port: int
:param remote_path: path to transfer files on the robot controller
:type remote_path: str
:param ftp_user: user name for the FTP connection
:type ftp_user: str
:param ftp_pass: password credential for the FTP connection
:type ftp_pass: str
.. seealso:: :func:`~robolink.Item.ConnectionParams`, :func:`~robolink.Item.Connect`, :func:`~robolink.Item.ConnectSafe`
"""
self.link._check_connection()
command = 'setConnectParams'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(robot_ip)
self.link._send_int(port)
self.link._send_line(remote_path)
self.link._send_line(ftp_user)
self.link._send_line(ftp_pass)
self.link._check_status()
return self
def ConnectedState(self):
"""Check connection status with a real robobt
Out 1 : status code -> (int) ROBOTCOM_READY if the robot is ready to move, otherwise, status message will provide more information about the issue
Out 2 : status message -> Message description of the robot status
.. seealso:: :func:`~robolink.Item.ConnectionParams`, :func:`~robolink.Item.Connect`, :func:`~robolink.Item.ConnectSafe`
Example:
.. code-block:: python
from robolink import * # import the robolink library
robot = RDK.Item('', ITEM_TYPE_ROBOT) # Get the first robot available
state = robot.Connect()
print(state)
# Check the connection status and message
state, msg = robot.ConnectedState()
print(state)
print(msg)
if state != ROBOTCOM_READY:
print('Problems connecting: ' + robot.Name() + ': ' + msg)
quit()
# Move the robot (real robot if we are connected)
robot.MoveJ(jnts, False)
"""
self.link._check_connection()
command = 'ConnectedState'
self.link._send_line(command)
self.link._send_item(self)
robotcom_status = self.link._rec_int()
status_msg = self.link._rec_line()
self.link._check_status()
return robotcom_status, status_msg
def Disconnect(self):
"""Disconnect from a real robot (when the robot driver is used)
Returns 1 if it disconnected successfully, 0 if it failed. It can fail if it was previously disconnected manually for example.
.. seealso:: :func:`~robolink.Item.Connect`, :func:`~robolink.Item.ConnectedState`
"""
self.link._check_connection()
command = 'Disconnect'
self.link._send_line(command)
self.link._send_item(self)
status = self.link._rec_int()
self.link._check_status()
return status
def MoveJ(self, target, blocking=True):
"""Move a robot to a specific target ("Move Joint" mode). This function waits (blocks) until the robot finishes its movements.
If this is used with a program item, a new joint movement instruction will be added to the program.
Important note when adding new movement instructions to programs: only target items supported, not poses.
:param target: Target to move to. It can be the robot joints (Nx1 or 1xN), the pose (4x4) or a target (item pointer)
:type target: :class:`.Mat`, list of joints or :class:`.Item`
:param blocking: Set to True to wait until the robot finished the movement (default=True). Set to false to make it a non blocking call. Tip: If set to False, use robot.Busy() to check if the robot is still moving.
:type blocking: bool
.. seealso:: :func:`~robolink.Item.MoveL`, :func:`~robolink.Item.MoveC`, :func:`~robolink.Item.SearchL`, :func:`~robolink.Robolink.AddTarget`
"""
if self.type == ITEM_TYPE_PROGRAM:
if type(target) != Item:
raise Exception("Adding a movement instruction to a program given joints or a pose is not supported. Use a target item instead, for example, add a target as with RDK.AddTarget(...) and set the pose or joints.")
self.addMoveJ(target)
else:
self.link._moveX(target, self, 1, blocking)
def MoveL(self, target, blocking=True):
"""Moves a robot to a specific target ("Move Linear" mode). This function waits (blocks) until the robot finishes its movements. This function can also be called on Programs and a new movement instruction will be added at the end of the program.
If this is used with a program item, a new linear movement instruction will be added to the program.
Important note when adding new movement instructions to programs: only target items supported, not poses.
:param target: Target to move to. It can be the robot joints (Nx1 or 1xN), the pose (4x4) or a target (item pointer)
:type target: :class:`.Mat`, list of joints or :class:`.Item`
:param blocking: Set to True to wait until the robot finished the movement (default=True). Set to false to make it a non blocking call. Tip: If set to False, use robot.Busy() to check if the robot is still moving.
:type blocking: bool
.. seealso:: :func:`~robolink.Item.MoveJ`, :func:`~robolink.Item.MoveC`, :func:`~robolink.Item.SearchL`, :func:`~robolink.Robolink.AddTarget`
"""
if self.type == ITEM_TYPE_PROGRAM:
if type(target) != Item:
raise Exception("Adding a movement instruction to a program given joints or a pose is not supported. Use a target item instead, for example, add a target as with RDK.AddTarget(...) and set the pose or joints.")
self.addMoveL(target)
else:
self.link._moveX(target, self, 2, blocking)
def SearchL(self, target, blocking=True):
"""Moves a robot to a specific target and stops when a specific input switch is detected ("Search Linear" mode). This function waits (blocks) until the robot finishes its movements.
:param target: Target to move to. It can be the robot joints (Nx1 or 1xN), the pose (4x4) or a target (item pointer)
:type target: :class:`.Mat`, list of joints or :class:`.Item`
:param blocking: Set to True to wait until the robot finished the movement (default=True). Set to false to make it a non blocking call. Tip: If set to False, use robot.Busy() to check if the robot is still moving.
:type blocking: bool
.. seealso:: :func:`~robolink.Item.MoveJ`, :func:`~robolink.Item.MoveL`, :func:`~robolink.Item.MoveC`, :func:`~robolink.Robolink.AddTarget`
"""
self.link._moveX(target, self, 5, blocking)
return self.SimulatorJoints()
def MoveC(self, target1, target2, blocking=True):
"""Move a robot to a specific target ("Move Circular" mode). By default, this procedure waits (blocks) until the robot finishes the movement.
:param target1: pose along the cicle movement
:type target1: :class:`.Mat`, list of joints or :class:`.Item`
:param target2: final circle target
:type target2: :class:`.Mat`, list of joints or :class:`.Item`
:param blocking: True if the instruction should wait until the robot finished the movement (default=True)
:type blocking: bool
.. seealso:: :func:`~robolink.Item.MoveL`, :func:`~robolink.Item.MoveC`, :func:`~robolink.Item.SearchL`, :func:`~robolink.Robolink.AddTarget`
"""
if self.type == ITEM_TYPE_PROGRAM:
if type(target1) != Item or type(target2) != Item:
raise Exception("Adding a movement instruction to a program given joints or a pose is not supported. Use a target item instead, for example, add a target as with RDK.AddTarget(...) and set the pose or joints.")
self.addMoveC(target1, target2)
else:
self.link.MoveC(target1, target2, self, blocking)
def MoveJ_Test(self, j1, j2, minstep_deg=-1):
"""Checks if a joint movement is feasible and free of collision (if collision checking is activated).
:param j1: start joints
:type j1: list of float
:param j2: end joints
:type j2: list of float
:param float minstep_deg: joint step in degrees
:return: returns 0 if the movement is free of collision or any other issues. Otherwise it returns the number of pairs of objects that collided if there was a collision.
:rtype: int
.. seealso:: :func:`~robolink.Item.MoveL_Test`, :func:`~robolink.Robolink.setCollisionActive`, :func:`~robolink.Item.MoveJ`, :func:`~robolink.Robolink.AddTarget`
"""
self.link._check_connection()
command = 'CollisionMove'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(j1)
self.link._send_array(j2)
self.link._send_int(minstep_deg*1000)
self.link.COM.settimeout(3600) # wait up to 1 hour
collision = self.link._rec_int()
self.link.COM.settimeout(self.link.TIMEOUT)
self.link._check_status()
return collision
def MoveL_Test(self, j1, pose, minstep_mm=-1):
"""Checks if a linear movement is feasible and free of collision (if collision checking is activated).
:param j1: start joints
:type j1: list of float
:param pose: end pose (position of the active tool with respect to the active reference frame)
:type pose: :class:`.Mat`
:param float minstep_mm: linear step in mm
:return: returns 0 if the movement is free of collision or any other issues.
:rtype: int
If the robot can not reach the target pose it returns -2. If the robot can reach the target but it can not make a linear movement it returns -1.
.. seealso:: :func:`~robolink.Item.MoveJ_Test`, :func:`~robolink.Item.setPoseFrame`, :func:`~robolink.Item.setPoseTool`, :func:`~robolink.Robolink.setCollisionActive`, :func:`~robolink.Item.MoveL`, :func:`~robolink.Robolink.AddTarget`
"""
self.link._check_connection()
command = 'CollisionMoveL'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array(j1)
self.link._send_pose(pose)
self.link._send_int(minstep_mm*1000)
self.link.COM.settimeout(3600) # wait up to 1 hour
collision = self.link._rec_int()
self.link.COM.settimeout(self.link.TIMEOUT)
self.link._check_status()
return collision
def setSpeed(self, speed_linear, speed_joints=-1, accel_linear=-1, accel_joints=-1):
"""Sets the linear speed of a robot. Additional arguments can be provided to set linear acceleration or joint speed and acceleration.
:param float speed_linear: linear speed -> speed in mm/s (-1 = no change)
:param float speed_joints: joint speed (optional) -> acceleration in mm/s2 (-1 = no change)
:param float accel_linear: linear acceleration (optional) -> acceleration in mm/s2 (-1 = no change)
:param float accel_joints: joint acceleration (optional) -> acceleration in deg/s2 (-1 = no change)
.. seealso:: :func:`~robolink.Item.setAcceleration`, :func:`~robolink.Item.setSpeedJoints`, :func:`~robolink.Item.setAccelerationJoints`
"""
self.link._check_connection()
command = 'S_Speed4'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array([float(speed_linear), float(speed_joints), float(accel_linear), float(accel_joints)])
self.link._check_status()
return self
def setAcceleration(self, accel_linear):
"""Sets the linear acceleration of a robot in mm/s2
:param float accel_linear: acceleration in mm/s2
.. seealso:: :func:`~robolink.Item.setSpeed`, :func:`~robolink.Item.setSpeedJoints`, :func:`~robolink.Item.setAccelerationJoints`
"""
self.setSpeed(-1,-1,accel_linear,-1)
return self
def setSpeedJoints(self, speed_joints):
"""Sets the joint speed of a robot in deg/s for rotary joints and mm/s for linear joints
:param float speed_joints: speed in deg/s for rotary joints and mm/s for linear joints
.. seealso:: :func:`~robolink.Item.setSpeed`, :func:`~robolink.Item.setAcceleration`, :func:`~robolink.Item.setAccelerationJoints`
"""
self.setSpeed(-1,speed_joints,-1,-1)
return self
def setAccelerationJoints(self, accel_joints):
"""Sets the joint acceleration of a robot
:param float accel_joints: acceleration in deg/s2 for rotary joints and mm/s2 for linear joints
.. seealso:: :func:`~robolink.Item.setSpeed`, :func:`~robolink.Item.setAcceleration`, :func:`~robolink.Item.setSpeedJoints`
"""
self.setSpeed(-1,-1,-1,accel_joints)
return self
def setRounding(self, rounding_mm):
"""Sets the rounding accuracy to smooth the edges of corners. In general, it is recommended to allow a small approximation near the corners to maintain a constant speed.
Setting a rounding values greater than 0 helps avoiding jerky movements caused by constant acceleration and decelerations.
:param float rounding_mm: rounding accuracy in mm. Set to -1 (default) for best accuracy and to have point to point movements (might have a jerky behavior)
This rounding parameter is also known as ZoneData (ABB), CNT (Fanuc), C_DIS/ADVANCE (KUKA), cornering (Mecademic) or blending (Universal Robots)
.. seealso:: :func:`~robolink.Item.setSpeed`
"""
self.link._check_connection()
command = 'S_ZoneData'
self.link._send_line(command)
self.link._send_int(rounding_mm*1000)
self.link._send_item(self)
self.link._check_status()
return self
def setZoneData(self, zonedata):
"""Obsolete. Use :func:`~robolink.Item.setRounding` instead."""
self.setRounding(zonedata)
def ShowSequence(self, matrix, display_type=-1, timeout=-1):
"""Displays a sequence of joints or poses in RoboDK.
:param matrix: list of joints as a matrix or as a list of joint arrays. A sequence of instructions is also supported (same sequence that was supported with RoKiSim).
:type matrix: list of list of float or a matrix of joints as a :class:`.Mat`"""
if type(matrix) == list and (len(matrix) == 0 or type(matrix[0]) == Mat):
# poses assumed
self.link._check_connection()
command = 'Show_SeqPoses'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array([display_type, timeout])
self.link._send_int(len(matrix))
for pose in matrix:
self.link._send_pose(pose)
self.link._check_status()
else:
# list of joints as a Mat assumed
self.link._check_connection()
command = 'Show_Seq'
self.link._send_line(command)
self.link._send_matrix(matrix)
self.link._send_item(self)
self.link._check_status()
def Busy(self):
"""Checks if a robot or program is currently running (busy or moving).
Returns a busy status (1=moving, 0=stopped)
.. seealso:: :func:`~robolink.Item.WaitMove`, :func:`~robolink.Item.RunProgram`, :func:`~robolink.Item.RunCodeCustom`, :func:`~robolink.Item.RunInstruction`
Example:
.. code-block:: python
from robolink import * # import the robolink library
RDK = Robolink() # Connect to the RoboDK API
prog = RDK.Item('MainProgram', ITEM_TYPE_PROGRAM)
prog.RunProgram()
while prog.Busy():
pause(0.1)
print("Program done")
"""
self.link._check_connection()
command = 'IsBusy'
self.link._send_line(command)
self.link._send_item(self)
busy = self.link._rec_int()
self.link._check_status()
return busy
def Stop(self):
"""Stop a program or a robot
.. seealso:: :func:`~robolink.Item.RunProgram`, :func:`~robolink.Item.MoveJ`
"""
self.link._check_connection()
command = 'Stop'
self.link._send_line(command)
self.link._send_item(self)
self.link._check_status()
def WaitMove(self, timeout=360000):
"""Waits (blocks) until the robot finishes its movement.
:param float timeout: Maximum time to wait for robot to finish its movement (in seconds)
.. seealso:: :func:`~robolink.Item.Busy`, :func:`~robolink.Item.MoveJ`
"""
self.link._check_connection()
command = 'WaitMove'
self.link._send_line(command)
self.link._send_item(self)
self.link._check_status()
self.link.COM.settimeout(timeout)
self.link._check_status()#will wait here
self.link.COM.settimeout(self.link.TIMEOUT)
#busy = self.link.Is_Busy(self.item)
#while busy:
# busy = self.link.Is_Busy(self.item)
def WaitFinished(self):
"""Wait until a program finishes or a robot completes its movement
.. seealso:: :func:`~robolink.Item.Busy`
"""
while self.Busy():
pause(0.05)
def ProgramStart(self, programname, folder='', postprocessor=''):
"""Defines the name of the program when a program must be generated.
It is possible to specify the name of the post processor as well as the folder to save the program.
This method must be called before any program output is generated (before any robot movement or other program instructions).
:param str progname: name of the program
:param str folder: folder to save the program, leave empty to use the default program folder
:param str postprocessor: name of the post processor (for a post processor in C:/RoboDK/Posts/Fanuc_post.py it is possible to provide "Fanuc_post.py" or simply "Fanuc_post")
.. seealso:: :func:`~robolink.Robolink.setRunMode`
"""
return self.link.ProgramStart(programname, folder, postprocessor, self)
def setAccuracyActive(self, accurate = 1):
"""Sets the accuracy of the robot active or inactive. A robot must have been calibrated to properly use this option.
:param int accurate: set to 1 to use the accurate model or 0 to use the nominal model
.. seealso:: :func:`~robolink.Item.AccuracyActive`
"""
self.link._check_connection()
command = 'S_AbsAccOn'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(accurate)
self.link._check_status()
def AccuracyActive(self, accurate = 1):
"""Returns True if the accurate kinematics are being used. Accurate kinematics are available after a robot calibration.
.. seealso:: :func:`~robolink.Item.setAccuracyActive`
"""
self.link._check_connection()
command = 'G_AbsAccOn'
self.link._send_line(command)
self.link._send_item(self)
isaccurate = self.link._rec_int()
self.link._check_status()
return isaccurate > 0
def setParamRobotTool(self, tool_mass=5, tool_cog=None):
"""Sets the tool mass and center of gravity. This is only used with accurate robots to improve accuracy.
:param float tool_mass: tool weigth in Kg
:param list tool_cog: tool center of gravity as [x,y,z] with respect to the robot flange
"""
self.link._check_connection()
command = 'S_ParamCalibTool'
self.link._send_line(command)
self.link._send_item(self)
values = []
values.append(tool_mass)
if tool_cog is not None:
values += tool_cog
self.link._send_array(values)
self.link._check_status()
def FilterProgram(self, filestr):
"""Filter a program file to improve accuracy for a specific robot. The robot must have been previously calibrated.
It returns 0 if the filter succeeded, or a negative value if there are filtering problems. It also returns a summary of the filtering.
:param str filestr: File path of the program. Formats supported include: JBI (Motoman), SRC (KUKA), MOD (ABB), PRG (ABB), LS (FANUC).
"""
self.link._check_connection()
command = 'FilterProg2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(filestr)
filter_status = self.link._rec_int()
filter_msg = self.link._rec_line()
self.link._check_status()
return filter_status, filter_msg
#"""Program item calls"""
def MakeProgram(self, filestr='', run_mode = RUNMODE_MAKE_ROBOTPROG):
"""Generate the program file. Returns True if the program was successfully generated.
:param str filestr: Path to save the program ending with a slash (not including the file name and extension). Make sure the folder ends with a slash. You can use backslashes or forward slashes to define the path. In most cases, the file name is defined by the program name (visible in the RoboDK tree) and the extension is defined by the Post Processor (the file extension must match the extension supported by your robot controller). It can be left empty to use the default action (save to the default programs location)
:param run_mode: RUNMODE_MAKE_ROBOTPROG to generate the program file. Alternatively, Use RUNMODE_MAKE_ROBOTPROG_AND_UPLOAD or RUNMODE_MAKE_ROBOTPROG_AND_START to transfer the program through FTP and execute the program.
:return: [success (True or False), log (str), transfer_succeeded (True/False)]
Transfer succeeded is True if there was a successful program transfer (if RUNMODE_MAKE_ROBOTPROG_AND_UPLOAD or RUNMODE_MAKE_ROBOTPROG_AND_START are used)
.. seealso:: :func:`~robolink.Robolink.setRunMode`
"""
self.link._check_connection()
command = 'MakeProg2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(filestr)
self.link._send_int(run_mode)
self.link.COM.settimeout(300) # wait up to 5 minutes for the program to generate
prog_status = self.link._rec_int()
self.link.COM.settimeout(self.link.TIMEOUT)
prog_log_str = self.link._rec_line()
transfer_status = self.link._rec_int()
self.link._check_status()
success = False
if prog_status > 0:
success = True
transfer_ok = False
if transfer_status > 0:
transfer_ok = True
self.LAST_STATUS_MESSAGE = prog_log_str
return success, prog_log_str, transfer_ok
def setRunType(self, program_run_type):
"""Set the Run Type of a program to specify if a program made using the GUI will be run in simulation mode or on the real robot ("Run on robot" option).
:param int program_run_type: Use "PROGRAM_RUN_ON_SIMULATOR" to set the program to run on the simulator only or "PROGRAM_RUN_ON_ROBOT" to force the program to run on the robot
.. seealso:: :func:`~robolink.Robolink.setRunMode` :func:`~robolink.Item.RunType`
"""
self.link._check_connection()
command = 'S_ProgRunType'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(program_run_type)
self.link._check_status()
def RunType(self):
"""Get the Run Type of a program to specify if a program made using the GUI will be run in simulation mode or on the real robot ("Run on robot" option).
.. seealso:: :func:`~robolink.Robolink.setRunMode` :func:`~robolink.Item.setRunType`
"""
self.link._check_connection()
command = 'G_ProgRunType'
self.link._send_line(command)
self.link._send_item(self)
program_run_type = self.link._rec_int()
self.link._check_status()
return program_run_type
def RunProgram(self, prog_parameters=None):
"""Obsolete. Use :func:`~robolink.Item.RunCode` instead. RunProgram is available for backwards compatibility."""
return self.RunCode(prog_parameters)
def RunCode(self, prog_parameters=None):
"""Run a program. It returns the number of instructions that can be executed successfully (a quick program check is performed before the program starts)
This is a non-blocking call. Use program.Busy() to check if the program execution finished, or program.WaitFinished() to wait until the program finishes.
:param prog_parameters: Program parameters can be provided for Python programs as a string
:type prog_parameters: list of str
.. seealso:: :func:`~robolink.Item.RunCodeCustom`, :func:`~robolink.Item.Busy`, :func:`~robolink.Robolink.AddProgram`
If setRunMode(RUNMODE_SIMULATE) is used: the program will be simulated (default run mode)
If setRunMode(RUNMODE_RUN_ROBOT) is used: the program will run on the robot (default when RUNMODE_RUN_ROBOT is used)
If setRunMode(RUNMODE_RUN_ROBOT) is used together with program.setRunType(PROGRAM_RUN_ON_ROBOT) -> the program will run sequentially on the robot the same way as if we right clicked the program and selected "Run on robot" in the RoboDK GUI
"""
self.link._check_connection()
if type(prog_parameters) == list:
command = 'RunProgParam'
self.link._send_line(command)
self.link._send_item(self)
parameters = ''
if type(prog_parameters) is list:
parameters = '<br>'.join(str(param_i) for param_i in prog_parameters)
else:
parameters = str(prog_parameters)
self.link._send_line(parameters)
else:
command = 'RunProg'
self.link._send_line(command)
self.link._send_item(self)
prog_status = self.link._rec_int()
self.link._check_status()
return prog_status
def RunCodeCustom(self, code, run_type=INSTRUCTION_CALL_PROGRAM):
"""Obsolete, use RunInstruction instead. Adds a program call, code, message or comment to the program. Returns 0 if succeeded.
.. seealso:: :func:`~robolink.Item.RunInstruction`
"""
return self.RunInstruction(code, run_type)
def RunInstruction(self, code, run_type=INSTRUCTION_CALL_PROGRAM):
"""Adds a program call, code, message or comment to the program. Returns 0 if succeeded.
:param str code: The code to insert, program to run, or comment to add.
:param int run_type: Use INSTRUCTION_* variable to specify if the code is a program call or just a raw code insert. For example, to add a line of customized code use:
.. code-block:: python
:caption: Available Instruction Types
INSTRUCTION_CALL_PROGRAM = 0 # Program call
INSTRUCTION_INSERT_CODE = 1 # Insert raw code in the generated program
INSTRUCTION_START_THREAD = 2 # Start a new process
INSTRUCTION_COMMENT = 3 # Add a comment in the code
INSTRUCTION_SHOW_MESSAGE = 4 # Add a message
.. seealso:: :func:`~robolink.Item.RunCode`, :func:`~robolink.Robolink.AddProgram`
Example:
.. code-block:: python
program.RunInstruction('Setting the spindle speed', INSTRUCTION_COMMENT)
program.RunInstruction('SetRPM(25000)', INSTRUCTION_INSERT_CODE)
program.RunInstruction('Done setting the spindle speed. Ready to start!', INSTRUCTION_SHOW_MESSAGE)
program.RunInstruction('Program1', INSTRUCTION_CALL_PROGRAM)
"""
self.link._check_connection()
command = 'RunCode2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(code.replace('\r\n','<<br>>').replace('\n','<<br>>'))
self.link._send_int(run_type)
prog_status = self.link._rec_int()
self.link._check_status()
return prog_status
def Pause(self, time_ms = -1):
"""Pause instruction for a robot or insert a pause instruction to a program (when generating code offline -offline programming- or when connected to the robot -online programming-).
:param float time_ms: time in miliseconds. Do not provide a value (leave the default -1) to pause until the user desires to resume the execution of a program.
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'RunPause'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(time_ms*1000.0)
self.link._check_status()
def setDO(self, io_var, io_value):
"""Set a Digital Output (DO). This command can also be used to set any generic variables to a desired value.
:param io_var: Digital Output (string or number)
:type io_var: str or int
:param io_value: value
:type io_value: str, int or float
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.setAO`, :func:`~robolink.Item.getDI`, :func:`~robolink.Item.getAI`
"""
self.link._check_connection()
command = 'setDO'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(io_var))
self.link._send_line(str(io_value))
self.link._check_status()
def setAO(self, io_var, io_value):
"""Set an Analog Output (AO).
:param io_var: Analog Output (string or number)
:type io_var: str or int
:param io_value: value
:type io_value: str, int or float
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.setDO`, :func:`~robolink.Item.getDI`, :func:`~robolink.Item.getAI`
"""
self.link._check_connection()
command = 'setAO'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(io_var))
self.link._send_line(str(io_value))
self.link._check_status()
def getDI(self, io_var):
"""Get a Digital Input (DI). This function is only useful when connected to a real robot using the robot driver. It returns a string related to the state of the Digital Input (1=True, 0=False). This function returns an empty string if the script is not executed on the robot.
:param io_var: Digital Input (string or number)
:type io_var: str or int
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.getAI`, :func:`~robolink.Item.setDO`, :func:`~robolink.Item.setAO`
"""
self.link._check_connection()
command = 'getDI'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(io_var))
io_value = self.link._rec_line()
self.link._check_status()
return io_value
def getAI(self, io_var):
"""Get an Analog Input (AI). This function is only useful when connected to a real robot using the robot driver. It returns a string related to the state of the Digital Input (0-1 or other range depending on the robot driver). This function returns an empty string if the script is not executed on the robot.
:param io_var: Analog Input (string or number)
:type io_var: str or int
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.getDI`, :func:`~robolink.Item.setDO`, :func:`~robolink.Item.setAO`
"""
self.link._check_connection()
command = 'getAI'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(io_var))
io_value = self.link._rec_line()
self.link._check_status()
return io_value
def waitDI(self, io_var, io_value, timeout_ms=-1):
"""Wait for an digital input io_var to attain a given value io_value. Optionally, a timeout can be provided.
:param io_var: digital input (string or number)
:type io_var: str or int
:param io_value: value
:type io_value: str, int or float
:param timeout_ms: timeout in milliseconds
:type timeout_ms: int or float
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'waitDI'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(io_var))
self.link._send_line(str(io_value))
self.link._send_int(timeout_ms*1000)
self.link._check_status()
def customInstruction(self, name, path_run, path_icon="", blocking=1, cmd_run_on_robot=""):
"""Add a custom instruction. This instruction will execute a Python file or an executable file.
:param name: digital input (string or number)
:type name: str or int
:param path_run: path to run (relative to RoboDK/bin folder or absolute path)
:type path_run: str
:param path_icon: icon path (relative to RoboDK/bin folder or absolute path)
:type path_icon: str
:param blocking: 1 if blocking, 0 if it is a non blocking executable trigger
:type blocking: int
:param cmd_run_on_robot: Command to run through the driver when connected to the robot
:type cmd_run_on_robot: str
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'InsCustom2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(name)
self.link._send_line(path_run)
self.link._send_line(path_icon)
self.link._send_line(cmd_run_on_robot)
self.link._send_int(blocking)
self.link._check_status()
def addMoveJ(self, itemtarget):
"""Adds a new robot joint move instruction to a program. This function is obsolete. Use MoveJ instead.
:param itemtarget: target item to move to
:type itemtarget: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.MoveJ`
"""
self.link._check_connection()
command = 'Add_INSMOVE'
self.link._send_line(command)
self.link._send_item(itemtarget)
self.link._send_item(self)
self.link._send_int(1)
self.link._check_status()
def addMoveL(self, itemtarget):
"""Adds a new linear move instruction to a program. This function is obsolete. Use MoveL instead.
:param itemtarget: target item to move to
:type itemtarget: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.MoveL`
"""
self.link._check_connection()
command = 'Add_INSMOVE'
self.link._send_line(command)
self.link._send_item(itemtarget)
self.link._send_item(self)
self.link._send_int(2)
self.link._check_status()
def addMoveC(self, itemtarget1, itemtarget2):
"""Adds a new circular move instruction to a program (This function is obsolete. Use MoveL instead.)
:param itemtarget: target item to move to
:type itemtarget: :class:`.Item`
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.MoveL`, :func:`~robolink.Item.MoveC`
"""
self.link._check_connection()
command = 'Add_INSMOVEC'
self.link._send_line(command)
self.link._send_item(itemtarget1)
self.link._send_item(itemtarget2)
self.link._send_item(self)
self.link._check_status()
def ShowInstructions(self, show=True):
"""Show or hide instruction items of a program in the RoboDK tree
:param show: Set to True to show the instruction nodes, otherwise, set to False
:type show: bool
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.ShowTargets`
"""
self.link._check_connection()
command = 'Prog_ShowIns'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(1 if show else 0)
self.link._check_status()
def ShowTargets(self, show=True):
"""Show or hide targets of a program in the RoboDK tree
:param show: Set to False to remove the target item (the target is not deleted as it remains inside the program), otherwise, set to True to show the targets
:type show: bool
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Item.ShowInstructions`
"""
self.link._check_connection()
command = 'Prog_ShowTargets'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(1 if show else 0)
self.link._check_status()
def InstructionCount(self):
"""Return the number of instructions of a program.
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'Prog_Nins'
self.link._send_line(command)
self.link._send_item(self)
nins = self.link._rec_int()
self.link._check_status()
return nins
def InstructionSelect(self, ins_id=-1):
"""Select an instruction in the program as a reference to add new instructions. New instructions will be added after the selected instruction. If no instruction ID is specified, the active instruction will be selected and returned.
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'Prog_SelIns'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(ins_id)
ins_id = self.link._rec_int()
self.link._check_status()
return ins_id
def InstructionDelete(self, ins_id=0):
"""Delete an instruction of a program
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'Prog_DelIns'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(ins_id)
success = self.link._rec_int() > 0
self.link._check_status()
return success
def Instruction(self, ins_id=-1):
"""Return the current program instruction or the instruction given the instruction id (if provided).
It returns the following information about an instruction:
name: name of the instruction (displayed in the RoboDK tree)
instype: instruction type (INS_TYPE_*). For example, INS_TYPE_MOVE for a movement instruction.
movetype: type of movement for INS_TYPE_MOVE instructions: MOVE_TYPE_JOINT for joint moves, or MOVE_TYPE_LINEAR for linear moves
isjointtarget: 1 if the target is specified in the joint space, otherwise, the target is specified in the cartesian space (by the pose)
target: pose of the target as :class:`.Item`
joints: robot joints for that target
:param ins_id: instruction id to return
:type ins_id: int
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Robolink.setInstruction`
"""
self.link._check_connection()
command = 'Prog_GIns'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(ins_id)
name = self.link._rec_line()
instype = self.link._rec_int()
movetype = None
isjointtarget = None
target = None
joints = None
if instype == INS_TYPE_MOVE:
movetype = self.link._rec_int()
isjointtarget = self.link._rec_int()
target = self.link._rec_pose()
joints = self.link._rec_array()
self.link._check_status()
return name, instype, movetype, isjointtarget, target, joints
def setInstruction(self, ins_id, name, instype, movetype, isjointtarget, target, joints):
"""Update a program instruction.
:param ins_id: index of the instruction (0 for the first instruction, 1 for the second, and so on)
:type ins_id: int
:param name: Name of the instruction (displayed in the RoboDK tree)
:type name: str
:param instype: Type of instruction. INS_TYPE_*
:type instype: int
:param movetype: Type of movement if the instruction is a movement (MOVE_TYPE_JOINT or MOVE_TYPE_LINEAR)
:type movetype: int
:param isjointtarget: 1 if the target is defined in the joint space, otherwise it means it is defined in the cartesian space (by the pose)
:type isjointtarget: int
:param target: target pose
:type target: :class:`.Mat`
:param joints: robot joints for the target
:type joints: list of float
.. seealso:: :func:`~robolink.Robolink.AddProgram`, :func:`~robolink.Robolink.Instruction`
"""
self.link._check_connection()
command = 'Prog_SIns'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_int(ins_id)
self.link._send_line(name)
self.link._send_int(instype)
if instype == INS_TYPE_MOVE:
self.link._send_int(movetype)
self.link._send_int(isjointtarget)
self.link._send_pose(target)
self.link._send_array(joints)
self.link._check_status()
def Update(self, check_collisions=COLLISION_OFF, timeout_sec = 3600, mm_step=-1, deg_step=-1):
"""Updates a program and returns the estimated time and the number of valid instructions.
An update can also be applied to a robot machining project. The update is performed on the generated program.
:param int check_collisions: Check collisions (COLLISION_ON -yes- or COLLISION_OFF -no-)
:param int timeout_sec: Maximum time to wait for the update to complete (in seconds)
:param float mm_step: Step in mm to split the program (-1 means default, as specified in Tools-Options-Motion)
:param float deg_step: Step in deg to split the program (-1 means default, as specified in Tools-Options-Motion)
:return: [valid_instructions, program_time, program_distance, valid_ratio, readable_msg]
valid_instructions: The number of valid instructions
program_time: Estimated cycle time (in seconds)
program_distance: Estimated distance that the robot TCP will travel (in mm)
valid_ratio: This is a ratio from [0.00 to 1.00] showing if the path can be fully completed without any problems (1.0 means the path 100% feasible) or
valid_ratio is <1.0 if there were problems along the path.
valid_ratio will be < 0 if Update is called on a machining project and the machining project can't be achieved successfully.
readable_msg: a readable message as a string
.. seealso:: :func:`~robolink.Robolink.AddProgram`
"""
self.link._check_connection()
command = 'Update2'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array([check_collisions, mm_step, deg_step])
self.link.COM.settimeout(timeout_sec) # wait up to 1 hour user to hit OK
values = self.link._rec_array().tolist()
self.link.COM.settimeout(self.link.TIMEOUT)
readable_msg = self.link._rec_line()
self.link._check_status()
valid_instructions = values[0]
program_time = values[1]
program_distance = values[2]
valid_program = values[3]
self.LAST_STATUS_MESSAGE = readable_msg
return valid_instructions, program_time, program_distance, valid_program, readable_msg
def InstructionList(self):
"""Returns the list of program instructions as an MxN matrix, where N is the number of instructions and M equals to 1 plus the number of robot axes. This is the equivalent sequence that used to be supported by RoKiSim.
Tip: Use RDK.ShowSequence(matrix) to dipslay a joint list or a RoKiSim sequence of instructions.
Out 1: Returns the matrix
Out 2: Returns 0 if the program has no issues
.. seealso:: :func:`~robolink.Item.ShowSequence`, :func:`~robolink.Item.InstructionListJoints`
"""
self.link._check_connection()
command = 'G_ProgInsList'
self.link._send_line(command)
self.link._send_item(self)
insmat = self.link._rec_matrix()
errors = self.link._rec_int()
self.link._check_status()
return insmat, errors
def InstructionListJoints(self, mm_step=10, deg_step=5, save_to_file = None, collision_check = COLLISION_OFF, flags = 0, time_step=0.1):
"""Returns a list of joints an MxN matrix, where M is the number of robot axes plus 4 columns. Linear moves are rounded according to the smoothing parameter set inside the program.
:param float mm_step: step in mm to split the linear movements
:param float deg_step: step in deg to split the joint movements
:param str save_to_file: (optional) save the result to a file as Comma Separated Values (CSV). If the file name is not provided it will return the matrix. If step values are very small, the returned matrix can be very large.
:param int collision_check: (optional) check for collisions
:param int flags: (optional) set to 1 to include the timings between movements, set to 2 to also include the joint speeds (deg/s), set to 3 to also include the accelerations, set to 4 to include all previous information and make the splitting time-based.
:param float time_step: (optional) set the time step in seconds for time based calculation
:return: [message (str), joint_list (:class:`~robodk.Mat`), status (int)]
Outputs:
message (str): Returns a human readable error message (if any).
joint_list (:class:`~robodk.Mat`): 2D matrix with all the joint information and corresponding information such as step, time stamp and speeds. Each entry is one column.
It also returns the list of joints as [J1, J2, ..., Jn, ERROR, MM_STEP, DEG_STEP, MOVE_ID, TIME, X,Y,Z] or the file name if a file path is provided to save the result. Default units are MM and DEG.
Use list(:class:`~robodk.Mat`) to extract each column in a list. The ERROR is returned as an int but it needs to be interpreted as a binary number.
status (int): Status is 0 if no problems arised. Otherwise it returns the number of instructions that can be successfully executed. If status is negative it means that one or more targets are not defined (missing target item).
.. code-block:: python
:caption: Error bit masks
# If error is not 0, check the binary error using the following bit masks
error_bin = int(str(ERROR),2)
ERROR_KINEMATIC = 0b001 # One or more points in the path is not reachable
ERROR_PATH_LIMIT = 0b010 # The path reached a joint axis limit
ERROR_PATH_NEARSINGULARITY = 0b1000 # The robot is too close to a wrist singularity (J5). Lower the singularity tolerance to allow the robot to continue.
ERROR_PATH_SINGULARITY = 0b100 # The robot reached a singularity point
ERROR_COLLISION = 0b100000 # Collision detected
.. seealso:: :func:`~robolink.Item.ShowSequence`
"""
self.link._check_connection()
command = 'G_ProgJointList'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_array([mm_step, deg_step, float(collision_check), float(flags), float(time_step)])
joint_list = save_to_file
self.link.COM.settimeout(3600)
if save_to_file is None:
self.link._send_line('')
joint_list = self.link._rec_matrix()
else:
self.link._send_line(save_to_file)
error_code = self.link._rec_int()
self.link.COM.settimeout(self.link.TIMEOUT)
error_msg = self.link._rec_line()
self.link._check_status()
return error_msg, joint_list, error_code
def setParam(self, param, value=''):
"""Send a specific parameter for an item.
:param str command: Command name
:param str value: Comand value (optional, not all commands require a value)
.. code-block:: python
:caption: Example to expand or collapse an item in the tree
from robolink import *
RDK = Robolink() # Start the RoboDK API
# How to change the number of threads using by the RoboDK application:
item = RDK.ItemUserPick("Select an item")
item.setParam("Tree", "Expand")
pause(2)
item.setParam("Tree", "Collapse")
.. code-block:: python
:caption: Example to change the post processor
robot = RDK.ItemUserPick("Select a robot", ITEM_TYPE_ROBOT)
# Set the robot post processor (name of the py file in the posts folder)
robot.setParam("PostProcessor", "Fanuc_RJ3")
.. code-block:: python
:caption: Example to change display style
# How to change the display style of an object (color as AARRGGBB):
obj = RDK.ItemUserPick('Select an object to change the style', ITEM_TYPE_OBJECT)
# Display points as simple dots given a certain size (suitable for fast rendering or large point clouds)
# Color is defined as AARRGGBB
obj.setValue('Display', 'POINTSIZE=4 COLOR=#FF771111')
# Display each point as a cube of a given size in mm
obj.setValue('Display','PARTICLE=CUBE(0.2,0.2,0.2) COLOR=#FF771111')
# Another way to change display style of points to display as a sphere (size,rings):
obj.setValue('Display','PARTICLE=SPHERE(4,8) COLOR=red')
# Example to change the size of displayed curves:
obj.setValue('Display','LINEW=4')
.. seealso:: :func:`~robolink.Robolink.setParam`
"""
self.link._check_connection()
command = 'ICMD'
self.link._send_line(command)
self.link._send_item(self)
self.link._send_line(str(param))
self.link._send_line(str(value).replace('\n','<br>'))
line = self.link._rec_line()
self.link._check_status()
return line
#if __name__ == "__main__":
# RDK = Robolink()
# r = RDK.Item('',ITEM_TYPE_ROBOT)
# p = r.Pose()
# prog = RDK.AddProgram('Test')
# prog.MoveL(RDK.AddTarget("First Point").setAsCartesianTarget().setPose(p))
# p1 = p*transl(50,0,0)
# p2 = p*transl(50,50,0)
# prog.MoveC(RDK.AddTarget("Second Point").setAsCartesianTarget().setPose(p1), RDK.AddTarget("Second Point").setAsCartesianTarget().setPose(p2))
# #pass
|
chunked_upload.py
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import next
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
import json
import os
import re
import sys
import time
from datetime import datetime
from io import BytesIO
from threading import Condition, Lock, Thread
from OpenSSL.SSL import SysCallError
from requests.exceptions import RequestException
from genestack_client import GenestackException
from genestack_client.utils import isatty
RETRY_ATTEMPTS = 5
RETRY_INTERVAL = 2 # seconds
NUM_THREADS = 5
CHUNK_SIZE = 1024 * 1024 * 5 # 5mb
class Chunk(object):
def __init__(self, number, start, size, chunk_size, total_size, token, filename, path, chunk_count, launch_time):
self.data = {
'resumableChunkSize': chunk_size,
'resumableType': '',
'resumableTotalSize': total_size,
'resumableIdentifier': token,
'resumableFilename': filename,
'resumableRelativePath': path,
'resumableTotalChunks': chunk_count,
'launchTime': launch_time,
'resumableChunkNumber': number,
'resumableCurrentChunkSize': size
}
self.start = start
self.size = size
def __str__(self):
return "Chunk %s %s bytes for %s" % (self.data['resumableChunkNumber'], self.size,
self.data['resumableRelativePath'])
def get_file(self):
container = BytesIO()
with open(self.data['resumableRelativePath'], 'rb') as f:
f.seek(self.start)
container.write(f.read(self.size))
container.seek(0)
return container
class PermanentError(GenestackException):
"""
If this exception is thrown, the upload will not be able to be resumed.
"""
pass
def with_lock(method):
"""
Execute method with lock. Instance should have lock object in lock attribute.
"""
def wrapper(self, *args):
with self.lock:
return method(self, *args)
return wrapper
class ChunkedUpload(object):
def __init__(self, application, path, chunk_size=None):
if chunk_size is None:
chunk_size = CHUNK_SIZE
if chunk_size <= 0:
raise GenestackException("Chunk size should be positive")
self.chunk_upload_url = '/application/uploadChunked/%s/unusedToken' % application.application_id
self.connection = application.connection
self.lock = Lock()
self.__iterator_lock = Lock()
self.__output_lock = Lock()
self.__application_result = None
self.__has_application_result = False
self.__finished = False
self.__error = None
self.thread_counter = 0
self.condition = Condition()
modified = datetime.fromtimestamp(os.path.getmtime(path))
total_size = os.path.getsize(path)
# TODO change according to javascript token
token = '{total_size}-{name}-{date}'.format(total_size=total_size,
name=re.sub('[^A-z0-9_-]', '_', os.path.basename(path)),
date=modified.strftime('%a_%b_%d_%Y_%H_%M_%S'))
self.token = token
self.path = path
# Last chunk can be larger than CHUNK_SIZE but less then two chunks.
# Example: CHUNK_SIZE = 2
# file size 2 > 1 chunk
# file size 3 > 1 chunk
# file size 4 > 2 chunk
# file size 5 > 2 chunk
if total_size < chunk_size * 2:
chunk_count = 1
else:
chunk_count = old_div(total_size, chunk_size)
self.total_size = total_size
self.filename = os.path.basename(path)
self.path = path
self.chunk_count = chunk_count
launch_time = int(time.time() * 1000)
# import from here to avoid circular imports
# TODO move progress functions to other module.
if isatty():
from .genestack_connection import TTYProgress
self.progress = TTYProgress()
else:
from .genestack_connection import DottedProgress
self.progress = DottedProgress(40)
def _iterator():
start = 0
info = [chunk_size, total_size, token, self.filename, path, chunk_count, launch_time]
for x in range(1, chunk_count + 1):
if x == chunk_count:
current_chunk_size = self.total_size - start
else:
current_chunk_size = chunk_size
yield Chunk(x, start, current_chunk_size, *info)
start += current_chunk_size
self.iterator = _iterator()
@property
@with_lock
def application_result(self):
return self.__application_result
@application_result.setter
@with_lock
def application_result(self, value):
self.__application_result = value
@property
@with_lock
def has_application_result(self):
return self.__has_application_result
@has_application_result.setter
@with_lock
def has_application_result(self, value):
self.__has_application_result = value
@property
@with_lock
def finished(self):
return self.__finished
@finished.setter
@with_lock
def finished(self, value):
self.__finished = value
@property
@with_lock
def error(self):
return self.__error
@error.setter
def error(self, value):
self.__error = value
def __update_progress(self, update_size):
with self.__output_lock:
self.progress(self.filename, update_size, self.total_size)
def __process_chunk(self, chunk):
"""
Try to upload a chunk of data in several attempts.
:param chunk:
:return:
"""
file_cache = None
upload_checked = False
error = None
for attempt in range(RETRY_ATTEMPTS):
# Check if chunk is already uploaded
if not upload_checked:
try:
response = self.connection.get_request(self.chunk_upload_url, params=chunk.data, follow=False)
except RequestException as e:
error = str(e)
time.sleep(RETRY_INTERVAL)
continue
if response.status_code == 200:
self.__update_progress(chunk.size)
return
else:
upload_checked = True
# try to upload chunk
if file_cache is None:
file_cache = chunk.get_file()
file_cache.seek(0)
try:
response = self.connection.post_multipart(self.chunk_upload_url,
data=chunk.data,
files={'file': file_cache},
follow=False)
except (RequestException, SysCallError) as e:
# check that any type of connection error occurred and retry.
time.sleep(RETRY_INTERVAL)
error = str(e)
if self.connection.debug:
sys.stderr.write('%s/%s attempt to upload %s failed. Connection error: %s\n' %
(attempt + 1, RETRY_ATTEMPTS, chunk, error))
continue
# done without errors
if response.status_code == 200:
self.__update_progress(chunk.size)
data = json.loads(response.text)
if data.get('lastChunkUploaded', False):
self.application_result = data['result']
self.has_application_result = True
self.finished = True
return
error = "Got response with status code: %s" % response.status_code
# permanent errors
if 400 <= response.status_code < 600:
self.finished = True
try:
data = json.loads(response.text)
if isinstance(data, dict) and 'error' in data:
error = data['error']
except ValueError:
pass
self.error = error
return
# other network errors, try again
time.sleep(RETRY_INTERVAL)
continue
self.error = error
self.finished = True
def upload(self):
def do_stuff():
"""
The daemon will look for uploads.
The daemon will quit if one of the following conditions is met:
- all chunks have been processed
- someone set self.finished to True
- the server said that the file upload was complete
- a permanent error was raised (4xx, 5xx)
- the number of RETRY_ATTEMPTS was exceeded for a single chunk
"""
with self.condition:
self.thread_counter += 1
try:
while not self.finished: # daemon working cycle
try:
with self.__iterator_lock:
chunk = next(self.iterator)
except StopIteration:
return
self.__process_chunk(chunk)
except Exception as e:
self.error = str(e)
finally:
with self.condition:
self.thread_counter -= 1
self.condition.notify()
threads = [Thread(target=do_stuff) for _ in range(min(NUM_THREADS, self.chunk_count))]
[thread.setDaemon(True) for thread in threads]
[thread.start() for thread in threads]
with self.condition:
while True:
try:
self.condition.wait()
except (KeyboardInterrupt, SystemExit):
self.error = 'Interrupted by user'
self.finished = True
break
if not self.thread_counter:
break
if self.has_application_result:
return self.application_result
else:
error_message = self.error or 'file has been uploaded from another session'
raise GenestackException('Fail to upload %s: %s' % (self.path, error_message))
def upload_by_chunks(application, path, chunk_size=None):
return ChunkedUpload(application, path, chunk_size=chunk_size).upload()
|
dist_autograd_test.py
|
import sys
import threading
import time
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
x = grads[rref.local_value()]
if x.is_sparse:
assert grad.is_sparse
x = x.to_dense()
grad = grad.to_dense()
else:
assert not grad.is_sparse
return torch.equal(x, grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32):
i = [[0, 1, 1], [2, 0, 2]]
v = [3.2, 4.1, 5.3]
tensor = torch.sparse_coo_tensor(i, v, (3, 3), requires_grad=requires_grad, dtype=dtype)
if coalesce:
tensor = tensor.coalesce()
return tensor
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
def _test_graph(self, fn, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor()
t2 = build_sparse_tensor()
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=False)
t2 = build_sparse_tensor(requires_grad=False)
else:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
def _test_rpc_complex_args(self, exec_mode, sparse):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
if sparse:
tensor = build_sparse_tensor(requires_grad=(i % 2 == 0))
else:
tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0))
tensors.append(tensor)
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
def _backward_no_grad_on_tensor(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2)
if sparse:
loss_local = torch.sparse.sum(loss_local)
else:
loss_local = loss_local.sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse):
local_ret = torch.add(t1, t2)
if sparse:
local_ret = torch.sparse.sum(local_ret)
else:
local_ret = local_ret.sum()
local_ret.backward()
with dist_autograd.context() as context_id:
if sparse:
rref_t1 = rpc.remote(
rref_owner, build_sparse_tensor, args=(False, True,)
)
else:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
if sparse:
ret = torch.sparse.sum(ret)
else:
ret = ret.sum()
dist_autograd.backward(context_id, [ret])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse):
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
if sparse:
torch.sparse.sum(local_ret).backward()
else:
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
if sparse:
val = self._exec_func(exec_mode, torch.mul, s1, s2)
val = self._exec_func(exec_mode, torch.mul, val, val)
loss = torch.sparse.sum(val)
else:
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
def _backward_different_dtypes(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_python_udf(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_script_call(self, t1, t2, sparse):
local_grads = None
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
if sparse:
loss = torch.sparse.sum(forward_ret)
else:
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
def _nested_backward_accumulate_grads(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
def _backwards_nested_python_udf(self, t1, t2, sparse):
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = t1 * t2 * t3 * t4 * res
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
def _mixed_requires_grad(self, t1, t2, sparse):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2
)
self.assertEqual(t1 * t2, ret)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
def _multiple_backward(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple(self, dst, t1, t2, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest):
# Sparse tests only work with TensorPipeAgent.
@dist_init
def test_graph_for_builtin_call_sparse(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_python_call_sparse(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_builtin_remote_call_sparse(self):
self._test_graph(torch.add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_python_remote_call_sparse(self):
self._test_graph(my_py_add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True)
@dist_init
def test_rpc_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, True)
@dist_init
def test_remote_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.REMOTE, True)
@dist_init
def test_context_cleanup_tensor_with_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_nested_rpc_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_backward_no_grad_on_tensor_sparse(self):
self._backward_no_grad_on_tensor(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_sparse(self):
self._backward_simple(
self._next_rank(),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_simple_self_sparse(self):
self._backward_simple(
self.rank,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_multi_sparse(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_sparse(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_nested_sparse(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_trainer_ps_sparse(self):
self._test_trainer_ps(
build_sparse_tensor,
_run_trainer,
True
)
@dist_init
def test_backward_multiple_round_trips_sparse(self):
self._backward_multiple_round_trips(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_different_dtypes_sparse(self):
self._backward_different_dtypes(
build_sparse_tensor(requires_grad=True, dtype=torch.float32),
build_sparse_tensor(requires_grad=True, dtype=torch.float64),
True
)
@dist_init
def test_backward_simple_python_udf_sparse(self):
self._backward_simple_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_script_call_sparse(self):
self._backward_simple_script_call(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_nested_backward_accumulate_grads_sparse(self):
self._nested_backward_accumulate_grads(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backwards_nested_python_udf_sparse(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_mixed_requires_grad_sparse(self):
self._mixed_requires_grad(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
True
)
@dist_init
def test_multiple_backward_sparse(self):
self._multiple_backward(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, False)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE, False)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
self._backward_no_grad_on_tensor(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
False
)
@dist_init
def test_backward_simple(self):
self._backward_simple(
self._next_rank(),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_simple_self(self):
self._backward_simple(
self.rank,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(
create_tensor,
_run_trainer,
False
)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False)
@dist_init
def test_backward_multiple_round_trips(self):
self._backward_multiple_round_trips(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
self._backward_different_dtypes(
torch.rand((3, 3), requires_grad=True, dtype=torch.float32),
torch.rand((3, 3), requires_grad=True, dtype=torch.float64),
False
)
@dist_init
def test_backward_simple_python_udf(self):
self._backward_simple_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_backward_simple_script_call(self):
self._backward_simple_script_call(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point.
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return t1 * t2 * t3 * t4 * res
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
return grad_map[embedding.weight]
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad, remote_grad)
@classmethod
def _mixed_requires_grad_operaton(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
self._mixed_requires_grad(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=False),
False
)
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
self._nested_backward_accumulate_grads(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_multiple_backward(self):
self._multiple_backward(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = []
for rank in range(1, self.world_size):
remote_layers.append(rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
))
x = torch.randn(5000, 2000).to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
|
interface.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.interface - webinterface
"""
import os
import time
from datetime import datetime
import cherrypy
import logging
import urllib.request, urllib.parse, urllib.error
import re
import hashlib
import socket
import ssl
import functools
from threading import Thread
from random import randint
from xml.sax.saxutils import escape
import sabnzbd
import sabnzbd.rss
import sabnzbd.scheduler as scheduler
from Cheetah.Template import Template
from sabnzbd.misc import (
to_units,
from_units,
time_format,
calc_age,
int_conv,
get_base_url,
probablyipv4,
probablyipv6,
opts_to_pp,
)
from sabnzbd.filesystem import real_path, long_path, globber, globber_full, remove_all, clip_path, same_file
from sabnzbd.newswrapper import GetServerParms
from sabnzbd.bpsmeter import BPSMeter
from sabnzbd.encoding import xml_name, utob
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.notifier as notifier
import sabnzbd.newsunpack
from sabnzbd.downloader import Downloader
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.decoder import SABYENC_ENABLED
from sabnzbd.utils.diskspeed import diskspeedmeasure
from sabnzbd.utils.getperformance import getpystone
from sabnzbd.utils.internetspeed import internetspeed
from sabnzbd.constants import MEBI, DEF_SKIN_COLORS, DEF_STDCONFIG, DEF_MAIN_TMPL, DEFAULT_PRIORITY, CHEETAH_DIRECTIVES
from sabnzbd.lang import list_languages
from sabnzbd.api import (
list_scripts,
list_cats,
del_from_section,
api_handler,
build_queue,
build_status,
retry_job,
build_header,
build_history,
del_hist_job,
Ttemplate,
build_queue_header,
)
##############################################################################
# Global constants
##############################################################################
##############################################################################
# Security functions
##############################################################################
def secured_expose(wrap_func=None, check_configlock=False, check_api_key=False):
""" Wrapper for both cherrypy.expose and login/access check """
if not wrap_func:
return functools.partial(secured_expose, check_configlock=check_configlock, check_api_key=check_api_key)
# Expose to cherrypy
wrap_func.exposed = True
@functools.wraps(wrap_func)
def internal_wrap(*args, **kwargs):
# Add X-Frame-Headers headers to page-requests
if cfg.x_frame_options():
cherrypy.response.headers["X-Frame-Options"] = "SameOrigin"
# Check if config is locked
if check_configlock and cfg.configlock():
cherrypy.response.status = 403
return "Access denied - Configuration locked"
# Check if external access
if not check_access():
cherrypy.response.status = 403
return "Access denied"
# Verify login status, only for non-key pages
if not check_login() and not check_api_key:
raise Raiser("/login/")
# Verify host used for the visit
if not check_hostname():
cherrypy.response.status = 403
return "Access denied - Hostname verification failed: https://sabnzbd.org/hostname-check"
# Some pages need correct API key
if check_api_key:
msg = check_apikey(kwargs)
if msg:
return msg
# All good, cool!
return wrap_func(*args, **kwargs)
return internal_wrap
def check_access(access_type=4):
"""Check if external address is allowed given access_type:
1=nzb
2=api
3=full_api
4=webui
5=webui with login for external
"""
referrer = cherrypy.request.remote.ip
# CherryPy will report ::ffff:192.168.0.10 on dual-stack situation
# It will always contain that ::ffff: prefix
range_ok = not cfg.local_ranges() or bool(
[1 for r in cfg.local_ranges() if (referrer.startswith(r) or referrer.replace("::ffff:", "").startswith(r))]
)
allowed = referrer in ("127.0.0.1", "::ffff:127.0.0.1", "::1") or range_ok or access_type <= cfg.inet_exposure()
if not allowed:
logging.debug("Refused connection from %s", referrer)
return allowed
def check_hostname():
"""Check if hostname is allowed, to mitigate DNS-rebinding attack.
Similar to CVE-2019-5702, we need to add protection even
if only allowed to be accessed via localhost.
"""
# If login is enabled, no API-key can be deducted
if cfg.username() and cfg.password():
return True
# Don't allow requests without Host
host = cherrypy.request.headers.get("Host")
if not host:
return False
# Remove the port-part (like ':8080'), if it is there, always on the right hand side.
# Not to be confused with IPv6 colons (within square brackets)
host = re.sub(":[0123456789]+$", "", host).lower()
# Fine if localhost or IP
if host == "localhost" or probablyipv4(host) or probablyipv6(host):
return True
# Check on the whitelist
if host in cfg.host_whitelist():
return True
# Fine if ends with ".local" or ".local.", aka mDNS name
# See rfc6762 Multicast DNS
if host.endswith((".local", ".local.")):
return True
# Ohoh, bad
log_warning_and_ip(T('Refused connection with hostname "%s" from:') % host)
return False
# Create a more unique ID for each instance
COOKIE_SECRET = str(randint(1000, 100000) * os.getpid())
def set_login_cookie(remove=False, remember_me=False):
"""We try to set a cookie as unique as possible
to the current user. Based on it's IP and the
current process ID of the SAB instance and a random
number, so cookies cannot be re-used
"""
salt = randint(1, 1000)
cookie_str = utob(str(salt) + cherrypy.request.remote.ip + COOKIE_SECRET)
cherrypy.response.cookie["login_cookie"] = hashlib.sha1(cookie_str).hexdigest()
cherrypy.response.cookie["login_cookie"]["path"] = "/"
cherrypy.response.cookie["login_cookie"]["httponly"] = 1
cherrypy.response.cookie["login_salt"] = salt
cherrypy.response.cookie["login_salt"]["path"] = "/"
cherrypy.response.cookie["login_salt"]["httponly"] = 1
# If we want to be remembered
if remember_me:
cherrypy.response.cookie["login_cookie"]["max-age"] = 3600 * 24 * 14
cherrypy.response.cookie["login_salt"]["max-age"] = 3600 * 24 * 14
# To remove
if remove:
cherrypy.response.cookie["login_cookie"]["expires"] = 0
cherrypy.response.cookie["login_salt"]["expires"] = 0
else:
# Notify about new login
notifier.send_notification(T("User logged in"), T("User logged in to the web interface"), "new_login")
def check_login_cookie():
# Do we have everything?
if "login_cookie" not in cherrypy.request.cookie or "login_salt" not in cherrypy.request.cookie:
return False
cookie_str = utob(str(cherrypy.request.cookie["login_salt"].value) + cherrypy.request.remote.ip + COOKIE_SECRET)
return cherrypy.request.cookie["login_cookie"].value == hashlib.sha1(cookie_str).hexdigest()
def check_login():
# Not when no authentication required or basic-auth is on
if not cfg.html_login() or not cfg.username() or not cfg.password():
return True
# If we show login for external IP, by using access_type=6 we can check if IP match
if cfg.inet_exposure() == 5 and check_access(access_type=6):
return True
# Check the cookie
return check_login_cookie()
def check_basic_auth(_, username, password):
""" CherryPy basic authentication validation """
return username == cfg.username() and password == cfg.password()
def set_auth(conf):
""" Set the authentication for CherryPy """
if cfg.username() and cfg.password() and not cfg.html_login():
conf.update(
{
"tools.auth_basic.on": True,
"tools.auth_basic.realm": "SABnzbd",
"tools.auth_basic.checkpassword": check_basic_auth,
}
)
conf.update(
{
"/api": {"tools.auth_basic.on": False},
"%s/api" % cfg.url_base(): {"tools.auth_basic.on": False},
}
)
else:
conf.update({"tools.auth_basic.on": False})
def check_apikey(kwargs):
"""Check API-key or NZB-key
Return None when OK, otherwise an error message
"""
mode = kwargs.get("mode", "")
name = kwargs.get("name", "")
# Lookup required access level, returns 4 for config-things
req_access = sabnzbd.api.api_level(mode, name)
if req_access == 1 and check_access(1):
# NZB-only actions
pass
elif not check_access(req_access):
return "Access denied"
# First check API-key, if OK that's sufficient
if not cfg.disable_key():
key = kwargs.get("apikey")
if not key:
if cfg.api_warnings():
log_warning_and_ip(
T("API Key missing, please enter the api key from Config->General into your 3rd party program:")
)
return "API Key Required"
elif req_access == 1 and key == cfg.nzb_key():
return None
elif key == cfg.api_key():
return None
else:
log_warning_and_ip(T("API Key incorrect, Use the api key from Config->General in your 3rd party program:"))
return "API Key Incorrect"
# No active API-key, check web credentials instead
if cfg.username() and cfg.password():
if check_login() or (
kwargs.get("ma_username") == cfg.username() and kwargs.get("ma_password") == cfg.password()
):
pass
else:
if cfg.api_warnings():
log_warning_and_ip(
T(
"Authentication missing, please enter username/password from Config->General into your 3rd party program:"
)
)
return "Missing authentication"
return None
def log_warning_and_ip(txt):
""" Include the IP and the Proxy-IP for warnings """
# Was it proxy forwarded?
xff = cherrypy.request.headers.get("X-Forwarded-For")
if xff:
txt = "%s %s (X-Forwarded-For: %s)>%s" % (
txt,
cherrypy.request.remote.ip,
xff,
cherrypy.request.headers.get("User-Agent", "??"),
)
else:
txt = "%s %s>%s" % (txt, cherrypy.request.remote.ip, cherrypy.request.headers.get("User-Agent", "??"))
logging.warning(txt)
##############################################################################
# Helper raiser functions
##############################################################################
def Raiser(root="", **kwargs):
args = {}
for key in kwargs:
val = kwargs.get(key)
if val:
args[key] = val
# Add extras
if args:
root = "%s?%s" % (root, urllib.parse.urlencode(args))
# Optionally add the leading /sabnzbd/ (or what the user set)
if not root.startswith(cfg.url_base()):
root = cherrypy.request.script_name + root
# Send the redirect
return cherrypy.HTTPRedirect(root)
def queueRaiser(root, kwargs):
return Raiser(root, start=kwargs.get("start"), limit=kwargs.get("limit"), search=kwargs.get("search"))
def rssRaiser(root, kwargs):
return Raiser(root, feed=kwargs.get("feed"))
##############################################################################
# Page definitions
##############################################################################
class MainPage:
def __init__(self):
self.__root = "/"
# Add all sub-pages
self.login = LoginPage()
self.queue = QueuePage("/queue/")
self.history = HistoryPage("/history/")
self.status = Status("/status/")
self.config = ConfigPage("/config/")
self.nzb = NzoPage("/nzb/")
self.wizard = Wizard("/wizard/")
@secured_expose
def index(self, **kwargs):
# Redirect to wizard if no servers are set
if kwargs.get("skip_wizard") or config.get_servers():
info = build_header()
info["scripts"] = list_scripts(default=True)
info["script"] = "Default"
info["cat"] = "Default"
info["categories"] = list_cats(True)
info["have_rss_defined"] = bool(config.get_rss())
info["have_watched_dir"] = bool(cfg.dirscan_dir())
# Have logout only with HTML and if inet=5, only when we are external
info["have_logout"] = (
cfg.username()
and cfg.password()
and (
cfg.html_login()
and (cfg.inet_exposure() < 5 or (cfg.inet_exposure() == 5 and not check_access(access_type=6)))
)
)
bytespersec_list = BPSMeter.do.get_bps_list()
info["bytespersec_list"] = ",".join([str(bps) for bps in bytespersec_list])
template = Template(
file=os.path.join(sabnzbd.WEB_DIR, "main.tmpl"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
else:
# Redirect to the setup wizard
raise cherrypy.HTTPRedirect("%s/wizard/" % cfg.url_base())
@secured_expose(check_api_key=True)
def shutdown(self, **kwargs):
# Check for PID
pid_in = kwargs.get("pid")
if pid_in and int(pid_in) != os.getpid():
return "Incorrect PID for this instance, remove PID from URL to initiate shutdown."
sabnzbd.shutdown_program()
return T("SABnzbd shutdown finished")
@secured_expose(check_api_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise Raiser(self.__root)
@cherrypy.expose
def api(self, **kwargs):
""" Redirect to API-handler """
return api_handler(kwargs)
@secured_expose
def scriptlog(self, **kwargs):
""" Needed for all skins, URL is fixed due to postproc """
# No session key check, due to fixed URLs
name = kwargs.get("name")
if name:
history_db = sabnzbd.get_db_connection()
return ShowString(history_db.get_name(name), history_db.get_script_log(name))
else:
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def retry(self, **kwargs):
""" Duplicate of retry of History, needed for some skins """
job = kwargs.get("job", "")
url = kwargs.get("url", "").strip()
pp = kwargs.get("pp")
cat = kwargs.get("cat")
script = kwargs.get("script")
if url:
sabnzbd.add_url(url, pp, script, cat, nzbname=kwargs.get("nzbname"))
del_hist_job(job, del_files=True)
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def retry_pp(self, **kwargs):
# Duplicate of History/retry_pp to please the SMPL skin :(
retry_job(kwargs.get("job"), kwargs.get("nzbfile"), kwargs.get("password"))
raise Raiser(self.__root)
@secured_expose
def robots_txt(self, **kwargs):
""" Keep web crawlers out """
cherrypy.response.headers["Content-Type"] = "text/plain"
return "User-agent: *\nDisallow: /\n"
##############################################################################
class Wizard:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
""" Show the language selection page """
if sabnzbd.WIN32:
from sabnzbd.utils.apireg import get_install_lng
cfg.language.set(get_install_lng())
logging.debug('Installer language code "%s"', cfg.language())
info = build_header(sabnzbd.WIZARD_DIR)
info["languages"] = list_languages()
template = Template(
file=os.path.join(sabnzbd.WIZARD_DIR, "index.html"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
@secured_expose(check_configlock=True)
def one(self, **kwargs):
""" Accept language and show server page """
if kwargs.get("lang"):
cfg.language.set(kwargs.get("lang"))
# Always setup Glitter
change_web_dir("Glitter - Default")
info = build_header(sabnzbd.WIZARD_DIR)
info["certificate_validation"] = sabnzbd.CERTIFICATE_VALIDATION
# Just in case, add server
servers = config.get_servers()
if not servers:
info["host"] = ""
info["port"] = ""
info["username"] = ""
info["password"] = ""
info["connections"] = ""
info["ssl"] = 0
info["ssl_verify"] = 2
else:
# Sort servers to get the first enabled one
server_names = sorted(
servers.keys(),
key=lambda svr: "%d%02d%s"
% (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()),
)
for server in server_names:
# If there are multiple servers, just use the first enabled one
s = servers[server]
info["host"] = s.host()
info["port"] = s.port()
info["username"] = s.username()
info["password"] = s.password.get_stars()
info["connections"] = s.connections()
info["ssl"] = s.ssl()
info["ssl_verify"] = s.ssl_verify()
if s.enable():
break
template = Template(
file=os.path.join(sabnzbd.WIZARD_DIR, "one.html"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
@secured_expose(check_configlock=True)
def two(self, **kwargs):
""" Accept server and show the final page for restart """
# Save server details
if kwargs:
kwargs["enable"] = 1
handle_server(kwargs)
config.save_config()
# Show Restart screen
info = build_header(sabnzbd.WIZARD_DIR)
info["access_url"], info["urls"] = get_access_info()
info["download_dir"] = cfg.download_dir.get_clipped_path()
info["complete_dir"] = cfg.complete_dir.get_clipped_path()
template = Template(
file=os.path.join(sabnzbd.WIZARD_DIR, "two.html"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
@secured_expose
def exit(self, **kwargs):
""" Stop SABnzbd """
sabnzbd.shutdown_program()
return T("SABnzbd shutdown finished")
def get_access_info():
""" Build up a list of url's that sabnzbd can be accessed from """
# Access_url is used to provide the user a link to sabnzbd depending on the host
access_uri = "localhost"
cherryhost = cfg.cherryhost()
if cherryhost == "0.0.0.0":
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
try:
addresses = socket.getaddrinfo(host, None)
except:
addresses = []
for addr in addresses:
address = addr[4][0]
# Filter out ipv6 addresses (should not be allowed)
if ":" not in address and address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers["host"]
host = host.rsplit(":")[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, "localhost")
elif cherryhost == "::":
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
addresses = socket.getaddrinfo(host, None)
for addr in addresses:
address = addr[4][0]
# Only ipv6 addresses will work
if ":" in address:
address = "[%s]" % address
if address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers["host"]
host = host.rsplit(":")[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, "localhost")
elif not cherryhost:
socks = [socket.gethostname()]
access_uri = socket.gethostname()
else:
socks = [cherryhost]
access_uri = cherryhost
urls = []
for sock in socks:
if sock:
if cfg.enable_https() and cfg.https_port():
url = "https://%s:%s%s" % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
url = "https://%s:%s%s" % (sock, cfg.cherryport(), cfg.url_base())
else:
url = "http://%s:%s%s" % (sock, cfg.cherryport(), cfg.url_base())
urls.append(url)
if cfg.enable_https() and cfg.https_port():
access_url = "https://%s:%s%s" % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
access_url = "https://%s:%s%s" % (access_uri, cfg.cherryport(), cfg.url_base())
else:
access_url = "http://%s:%s%s" % (access_uri, cfg.cherryport(), cfg.url_base())
return access_url, urls
##############################################################################
class LoginPage:
@cherrypy.expose
def index(self, **kwargs):
# Base output var
info = build_header(sabnzbd.WEB_DIR_CONFIG)
info["error"] = ""
# Logout?
if kwargs.get("logout"):
set_login_cookie(remove=True)
raise Raiser()
# Check if there's even a username/password set
if check_login():
raise Raiser(cherrypy.request.script_name + "/")
# Was it proxy forwarded?
xff = cherrypy.request.headers.get("X-Forwarded-For")
# Check login info
if kwargs.get("username") == cfg.username() and kwargs.get("password") == cfg.password():
# Save login cookie
set_login_cookie(remember_me=kwargs.get("remember_me", False))
# Log the succes
if xff:
logging.info("Successful login from %s (X-Forwarded-For: %s)", cherrypy.request.remote.ip, xff)
else:
logging.info("Successful login from %s", cherrypy.request.remote.ip)
# Redirect
raise Raiser(cherrypy.request.script_name + "/")
elif kwargs.get("username") or kwargs.get("password"):
info["error"] = T("Authentication failed, check username/password.")
# Warn about the potential security problem
fail_msg = T("Unsuccessful login attempt from %s") % cherrypy.request.remote.ip
if xff:
fail_msg = "%s (X-Forwarded-For: %s)" % (fail_msg, xff)
logging.warning(fail_msg)
# Show login
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "login", "main.tmpl"),
searchList=[info],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
##############################################################################
class NzoPage:
def __init__(self, root):
self.__root = root
self.__cached_selection = {} # None
@secured_expose
def default(self, *args, **kwargs):
# Allowed URL's
# /nzb/SABnzbd_nzo_xxxxx/
# /nzb/SABnzbd_nzo_xxxxx/details
# /nzb/SABnzbd_nzo_xxxxx/files
# /nzb/SABnzbd_nzo_xxxxx/bulk_operation
# /nzb/SABnzbd_nzo_xxxxx/save
nzo_id = None
for a in args:
if a.startswith("SABnzbd_nzo"):
nzo_id = a
break
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo_id and nzo:
info, pnfo_list, bytespersec, q_size, bytes_left_previous_page = build_queue_header()
# /SABnzbd_nzo_xxxxx/bulk_operation
if "bulk_operation" in args:
return self.bulk_operation(nzo_id, kwargs)
# /SABnzbd_nzo_xxxxx/details
elif "details" in args:
info = self.nzo_details(info, pnfo_list, nzo_id)
# /SABnzbd_nzo_xxxxx/files
elif "files" in args:
info = self.nzo_files(info, nzo_id)
# /SABnzbd_nzo_xxxxx/save
elif "save" in args:
self.save_details(nzo_id, args, kwargs)
return # never reached
# /SABnzbd_nzo_xxxxx/
else:
info = self.nzo_details(info, pnfo_list, nzo_id)
info = self.nzo_files(info, nzo_id)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR, "nzo.tmpl"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
else:
# Job no longer exists, go to main page
raise Raiser(urllib.parse.urljoin(self.__root, "../queue/"))
def nzo_details(self, info, pnfo_list, nzo_id):
slot = {}
n = 0
for pnfo in pnfo_list:
if pnfo.nzo_id == nzo_id:
nzo = NzbQueue.do.get_nzo(nzo_id)
repair = pnfo.repair
unpack = pnfo.unpack
delete = pnfo.delete
unpackopts = opts_to_pp(repair, unpack, delete)
script = pnfo.script
if script is None:
script = "None"
cat = pnfo.category
if not cat:
cat = "None"
slot["nzo_id"] = str(nzo_id)
slot["cat"] = cat
slot["filename"] = nzo.final_name
slot["filename_clean"] = nzo.final_name
slot["password"] = nzo.password or ""
slot["script"] = script
slot["priority"] = str(pnfo.priority)
slot["unpackopts"] = str(unpackopts)
info["index"] = n
break
n += 1
info["slot"] = slot
info["scripts"] = list_scripts()
info["categories"] = list_cats()
info["noofslots"] = len(pnfo_list)
return info
def nzo_files(self, info, nzo_id):
active = []
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
info["nzo_id"] = pnfo.nzo_id
info["filename"] = pnfo.filename
for nzf in pnfo.active_files:
checked = False
if nzf.nzf_id in self.__cached_selection and self.__cached_selection[nzf.nzf_id] == "on":
checked = True
active.append(
{
"filename": nzf.filename if nzf.filename else nzf.subject,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"size": to_units(nzf.bytes, "B"),
"sizeleft": to_units(nzf.bytes_left, "B"),
"nzf_id": nzf.nzf_id,
"age": calc_age(nzf.date),
"checked": checked,
}
)
info["active_files"] = active
return info
def save_details(self, nzo_id, args, kwargs):
index = kwargs.get("index", None)
name = kwargs.get("name", None)
password = kwargs.get("password", None)
if password == "":
password = None
pp = kwargs.get("pp", None)
script = kwargs.get("script", None)
cat = kwargs.get("cat", None)
priority = kwargs.get("priority", None)
nzo = NzbQueue.do.get_nzo(nzo_id)
if index is not None:
NzbQueue.do.switch(nzo_id, index)
if name is not None:
NzbQueue.do.change_name(nzo_id, name, password)
if cat is not None and nzo.cat is not cat and not (nzo.cat == "*" and cat == "Default"):
NzbQueue.do.change_cat(nzo_id, cat, priority)
# Category changed, so make sure "Default" attributes aren't set again
if script == "Default":
script = None
if priority == "Default":
priority = None
if pp == "Default":
pp = None
if script is not None and nzo.script != script:
NzbQueue.do.change_script(nzo_id, script)
if pp is not None and nzo.pp != pp:
NzbQueue.do.change_opts(nzo_id, pp)
if priority is not None and nzo.priority != int(priority):
NzbQueue.do.set_priority(nzo_id, priority)
raise Raiser(urllib.parse.urljoin(self.__root, "../queue/"))
def bulk_operation(self, nzo_id, kwargs):
self.__cached_selection = kwargs
if kwargs["action_key"] == "Delete":
for key in kwargs:
if kwargs[key] == "on":
NzbQueue.do.remove_nzf(nzo_id, key, force_delete=True)
elif kwargs["action_key"] in ("Top", "Up", "Down", "Bottom"):
nzf_ids = []
for key in kwargs:
if kwargs[key] == "on":
nzf_ids.append(key)
size = int_conv(kwargs.get("action_size", 1))
if kwargs["action_key"] == "Top":
NzbQueue.do.move_top_bulk(nzo_id, nzf_ids)
elif kwargs["action_key"] == "Up":
NzbQueue.do.move_up_bulk(nzo_id, nzf_ids, size)
elif kwargs["action_key"] == "Down":
NzbQueue.do.move_down_bulk(nzo_id, nzf_ids, size)
elif kwargs["action_key"] == "Bottom":
NzbQueue.do.move_bottom_bulk(nzo_id, nzf_ids)
if NzbQueue.do.get_nzo(nzo_id):
url = urllib.parse.urljoin(self.__root, nzo_id)
else:
url = urllib.parse.urljoin(self.__root, "../queue")
if url and not url.endswith("/"):
url += "/"
raise Raiser(url)
##############################################################################
class QueuePage:
def __init__(self, root):
self.__root = root
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
search = kwargs.get("search")
info, _pnfo_list, _bytespersec = build_queue(start=start, limit=limit, trans=True, search=search)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR, "queue.tmpl"), searchList=[info], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
@secured_expose(check_api_key=True)
def delete(self, **kwargs):
uid = kwargs.get("uid")
del_files = int_conv(kwargs.get("del_files"))
if uid:
NzbQueue.do.remove(uid, add_to_history=False, delete_all_data=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def purge(self, **kwargs):
NzbQueue.do.remove_all(kwargs.get("search"))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def change_queue_complete_action(self, **kwargs):
"""Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
"""
action = kwargs.get("action")
sabnzbd.change_queue_complete_action(action)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def switch(self, **kwargs):
uid1 = kwargs.get("uid1")
uid2 = kwargs.get("uid2")
if uid1 and uid2:
NzbQueue.do.switch(uid1, uid2)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def change_opts(self, **kwargs):
nzo_id = kwargs.get("nzo_id")
pp = kwargs.get("pp", "")
if nzo_id and pp and pp.isdigit():
NzbQueue.do.change_opts(nzo_id, int(pp))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def change_script(self, **kwargs):
nzo_id = kwargs.get("nzo_id")
script = kwargs.get("script", "")
if nzo_id and script:
if script == "None":
script = None
NzbQueue.do.change_script(nzo_id, script)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def change_cat(self, **kwargs):
nzo_id = kwargs.get("nzo_id")
cat = kwargs.get("cat", "")
if nzo_id and cat:
if cat == "None":
cat = None
NzbQueue.do.change_cat(nzo_id, cat)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def shutdown(self, **kwargs):
sabnzbd.shutdown_program()
return T("SABnzbd shutdown finished")
@secured_expose(check_api_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def pause_nzo(self, **kwargs):
uid = kwargs.get("uid", "")
NzbQueue.do.pause_multiple_nzo(uid.split(","))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def resume_nzo(self, **kwargs):
uid = kwargs.get("uid", "")
NzbQueue.do.resume_multiple_nzo(uid.split(","))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def set_priority(self, **kwargs):
NzbQueue.do.set_priority(kwargs.get("nzo_id"), kwargs.get("priority"))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def sort_by_avg_age(self, **kwargs):
NzbQueue.do.sort_queue("avg_age", kwargs.get("dir"))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def sort_by_name(self, **kwargs):
NzbQueue.do.sort_queue("name", kwargs.get("dir"))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def sort_by_size(self, **kwargs):
NzbQueue.do.sort_queue("size", kwargs.get("dir"))
raise queueRaiser(self.__root, kwargs)
##############################################################################
class HistoryPage:
def __init__(self, root):
self.__root = root
self.__failed_only = False
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
search = kwargs.get("search")
failed_only = kwargs.get("failed_only")
if failed_only is None:
failed_only = self.__failed_only
history = build_header()
history["failed_only"] = failed_only
history["rating_enable"] = bool(cfg.rating_enable())
postfix = T("B") # : Abbreviation for bytes, as in GB
grand, month, week, day = BPSMeter.do.get_sums()
history["total_size"], history["month_size"], history["week_size"], history["day_size"] = (
to_units(grand, postfix=postfix),
to_units(month, postfix=postfix),
to_units(week, postfix=postfix),
to_units(day, postfix=postfix),
)
history["lines"], history["fetched"], history["noofslots"] = build_history(
start=start, limit=limit, search=search, failed_only=failed_only
)
if search:
history["search"] = escape(search)
else:
history["search"] = ""
history["start"] = int_conv(start)
history["limit"] = int_conv(limit)
history["finish"] = history["start"] + history["limit"]
if history["finish"] > history["noofslots"]:
history["finish"] = history["noofslots"]
if not history["finish"]:
history["finish"] = history["fetched"]
history["time_format"] = time_format
template = Template(
file=os.path.join(sabnzbd.WEB_DIR, "history.tmpl"),
searchList=[history],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True)
def purge(self, **kwargs):
history_db = sabnzbd.get_db_connection()
history_db.remove_history()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def delete(self, **kwargs):
job = kwargs.get("job")
del_files = int_conv(kwargs.get("del_files"))
if job:
jobs = job.split(",")
for job in jobs:
del_hist_job(job, del_files=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True)
def retry_pp(self, **kwargs):
retry_job(kwargs.get("job"), kwargs.get("nzbfile"), kwargs.get("password"))
raise queueRaiser(self.__root, kwargs)
##############################################################################
class ConfigPage:
def __init__(self, root):
self.__root = root
self.folders = ConfigFolders("/config/folders/")
self.notify = ConfigNotify("/config/notify/")
self.general = ConfigGeneral("/config/general/")
self.rss = ConfigRss("/config/rss/")
self.scheduling = ConfigScheduling("/config/scheduling/")
self.server = ConfigServer("/config/server/")
self.switches = ConfigSwitches("/config/switches/")
self.categories = ConfigCats("/config/categories/")
self.sorting = ConfigSorting("/config/sorting/")
self.special = ConfigSpecial("/config/special/")
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["configfn"] = clip_path(config.get_filename())
conf["cmdline"] = sabnzbd.CMDLINE
conf["build"] = sabnzbd.__baseline__[:7]
conf["have_unzip"] = bool(sabnzbd.newsunpack.ZIP_COMMAND)
conf["have_7zip"] = bool(sabnzbd.newsunpack.SEVEN_COMMAND)
conf["have_sabyenc"] = SABYENC_ENABLED
conf["have_mt_par2"] = sabnzbd.newsunpack.PAR2_MT
conf["certificate_validation"] = sabnzbd.CERTIFICATE_VALIDATION
conf["ssl_version"] = ssl.OPENSSL_VERSION
new = {}
for svr in config.get_servers():
new[svr] = {}
conf["servers"] = new
conf["folders"] = NzbQueue.do.scan_jobs(all_jobs=False, action=False)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True)
def restart(self, **kwargs):
logging.info("Restart requested by interface")
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return T(
' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />'
)
@secured_expose(check_api_key=True)
def repair(self, **kwargs):
logging.info("Queue repair requested by interface")
sabnzbd.request_repair()
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return T(
' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />'
)
##############################################################################
LIST_DIRPAGE = (
"download_dir",
"download_free",
"complete_dir",
"admin_dir",
"nzb_backup_dir",
"dirscan_dir",
"dirscan_speed",
"script_dir",
"email_dir",
"permissions",
"log_dir",
"password_file",
)
class ConfigFolders:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
for kw in LIST_DIRPAGE:
conf[kw] = config.get_config("misc", kw)()
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_folders.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveDirectories(self, **kwargs):
for kw in LIST_DIRPAGE:
value = kwargs.get(kw)
if value is not None:
if kw in ("complete_dir", "dirscan_dir"):
msg = config.get_config("misc", kw).set(value, create=True)
else:
msg = config.get_config("misc", kw).set(value)
if msg:
# return sabnzbd.api.report('json', error=msg)
return badParameterResponse(msg, kwargs.get("ajax"))
sabnzbd.check_incomplete_vs_complete()
config.save_config()
if kwargs.get("ajax"):
return sabnzbd.api.report("json")
else:
raise Raiser(self.__root)
##############################################################################
SWITCH_LIST = (
"par_option",
"top_only",
"direct_unpack",
"enable_meta",
"win_process_prio",
"auto_sort",
"propagation_delay",
"auto_disconnect",
"flat_unpack",
"safe_postproc",
"no_dupes",
"replace_spaces",
"replace_dots",
"ignore_samples",
"pause_on_post_processing",
"nice",
"ionice",
"pre_script",
"pause_on_pwrar",
"sfv_check",
"deobfuscate_final_filenames",
"folder_rename",
"load_balancing",
"quota_size",
"quota_day",
"quota_resume",
"quota_period",
"history_retention",
"pre_check",
"max_art_tries",
"fail_hopeless_jobs",
"enable_all_par",
"enable_recursive",
"no_series_dupes",
"series_propercheck",
"script_can_fail",
"new_nzb_on_failure",
"unwanted_extensions",
"action_on_unwanted_extensions",
"sanitize_safe",
"rating_enable",
"rating_api_key",
"rating_filter_enable",
"rating_filter_abort_audio",
"rating_filter_abort_video",
"rating_filter_abort_encrypted",
"rating_filter_abort_encrypted_confirm",
"rating_filter_abort_spam",
"rating_filter_abort_spam_confirm",
"rating_filter_abort_downvoted",
"rating_filter_abort_keywords",
"rating_filter_pause_audio",
"rating_filter_pause_video",
"rating_filter_pause_encrypted",
"rating_filter_pause_encrypted_confirm",
"rating_filter_pause_spam",
"rating_filter_pause_spam_confirm",
"rating_filter_pause_downvoted",
"rating_filter_pause_keywords",
)
class ConfigSwitches:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["certificate_validation"] = sabnzbd.CERTIFICATE_VALIDATION
conf["have_nice"] = bool(sabnzbd.newsunpack.NICE_COMMAND)
conf["have_ionice"] = bool(sabnzbd.newsunpack.IONICE_COMMAND)
conf["cleanup_list"] = cfg.cleanup_list.get_string()
for kw in SWITCH_LIST:
conf[kw] = config.get_config("misc", kw)()
conf["unwanted_extensions"] = cfg.unwanted_extensions.get_string()
conf["scripts"] = list_scripts() or ["None"]
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_switches.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveSwitches(self, **kwargs):
for kw in SWITCH_LIST:
item = config.get_config("misc", kw)
value = kwargs.get(kw)
if kw == "unwanted_extensions" and value:
value = value.lower().replace(".", "")
msg = item.set(value)
if msg:
return badParameterResponse(msg, kwargs.get("ajax"))
cleanup_list = kwargs.get("cleanup_list")
if cleanup_list and sabnzbd.WIN32:
cleanup_list = cleanup_list.lower()
cfg.cleanup_list.set(cleanup_list)
config.save_config()
if kwargs.get("ajax"):
return sabnzbd.api.report("json")
else:
raise Raiser(self.__root)
##############################################################################
SPECIAL_BOOL_LIST = (
"start_paused",
"no_penalties",
"fast_fail",
"overwrite_files",
"enable_par_cleanup",
"queue_complete_pers",
"api_warnings",
"helpfull_warnings",
"ampm",
"enable_unrar",
"enable_unzip",
"enable_7zip",
"enable_filejoin",
"enable_tsjoin",
"ignore_unrar_dates",
"osx_menu",
"osx_speed",
"win_menu",
"allow_incomplete_nzb",
"rss_filenames",
"ipv6_hosting",
"keep_awake",
"empty_postproc",
"html_login",
"wait_for_dfolder",
"max_art_opt",
"enable_bonjour",
"warn_dupl_jobs",
"replace_illegal",
"backup_for_duplicates",
"disable_api_key",
"api_logging",
"x_frame_options",
"require_modern_tls",
)
SPECIAL_VALUE_LIST = (
"size_limit",
"movie_rename_limit",
"nomedia_marker",
"max_url_retries",
"req_completion_rate",
"wait_ext_drive",
"max_foldername_length",
"show_sysload",
"url_base",
"direct_unpack_threads",
"ipv6_servers",
"selftest_host",
"rating_host",
)
SPECIAL_LIST_LIST = ("rss_odd_titles", "quick_check_ext_ignore", "host_whitelist")
class ConfigSpecial:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["switches"] = [
(kw, config.get_config("misc", kw)(), config.get_config("misc", kw).default()) for kw in SPECIAL_BOOL_LIST
]
conf["entries"] = [
(kw, config.get_config("misc", kw)(), config.get_config("misc", kw).default()) for kw in SPECIAL_VALUE_LIST
]
conf["entries"].extend(
[
(kw, config.get_config("misc", kw).get_string(), config.get_config("misc", kw).default_string())
for kw in SPECIAL_LIST_LIST
]
)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_special.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveSpecial(self, **kwargs):
for kw in SPECIAL_BOOL_LIST + SPECIAL_VALUE_LIST + SPECIAL_LIST_LIST:
item = config.get_config("misc", kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
GENERAL_LIST = (
"host",
"port",
"username",
"refresh_rate",
"language",
"cache_limit",
"local_ranges",
"inet_exposure",
"enable_https",
"https_port",
"https_cert",
"https_key",
"https_chain",
"enable_https_verification",
"auto_browser",
"check_new_rel",
)
class ConfigGeneral:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def ListColors(web_dir):
lst = []
web_dir = os.path.join(sabnzbd.DIR_INTERFACES, web_dir)
dd = os.path.abspath(web_dir + "/templates/static/stylesheets/colorschemes")
if (not dd) or (not os.access(dd, os.R_OK)):
return lst
for color in globber(dd):
col = color.replace(".css", "")
lst.append(col)
return lst
def add_color(skin_dir, color):
if skin_dir:
if not color:
try:
color = DEF_SKIN_COLORS[skin_dir.lower()]
except KeyError:
return skin_dir
return "%s - %s" % (skin_dir, color)
else:
return ""
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["configfn"] = config.get_filename()
conf["certificate_validation"] = sabnzbd.CERTIFICATE_VALIDATION
wlist = []
interfaces = globber_full(sabnzbd.DIR_INTERFACES)
for k in interfaces:
if k.endswith(DEF_STDCONFIG):
interfaces.remove(k)
continue
for web in interfaces:
rweb = os.path.basename(web)
if os.access(os.path.join(web, DEF_MAIN_TMPL), os.R_OK):
cols = ListColors(rweb)
if cols:
for col in cols:
wlist.append(add_color(rweb, col))
else:
wlist.append(rweb)
conf["web_list"] = wlist
conf["web_dir"] = add_color(cfg.web_dir(), cfg.web_color())
conf["password"] = cfg.password.get_stars()
conf["language"] = cfg.language()
lang_list = list_languages()
if len(lang_list) < 2:
lang_list = []
conf["lang_list"] = lang_list
for kw in GENERAL_LIST:
conf[kw] = config.get_config("misc", kw)()
conf["bandwidth_max"] = cfg.bandwidth_max()
conf["bandwidth_perc"] = cfg.bandwidth_perc()
conf["nzb_key"] = cfg.nzb_key()
conf["local_ranges"] = cfg.local_ranges.get_string()
conf["my_lcldata"] = cfg.admin_dir.get_clipped_path()
conf["caller_url"] = cherrypy.request.base + cfg.url_base()
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_general.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveGeneral(self, **kwargs):
# Handle general options
for kw in GENERAL_LIST:
item = config.get_config("misc", kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
# Handle special options
cfg.password.set(kwargs.get("password"))
web_dir = kwargs.get("web_dir")
change_web_dir(web_dir)
bandwidth_max = kwargs.get("bandwidth_max")
if bandwidth_max is not None:
cfg.bandwidth_max.set(bandwidth_max)
bandwidth_perc = kwargs.get("bandwidth_perc")
if bandwidth_perc is not None:
cfg.bandwidth_perc.set(bandwidth_perc)
bandwidth_perc = cfg.bandwidth_perc()
if bandwidth_perc and not bandwidth_max:
logging.warning_helpful(T("You must set a maximum bandwidth before you can set a bandwidth limit"))
config.save_config()
# Update CherryPy authentication
set_auth(cherrypy.config)
if kwargs.get("ajax"):
return sabnzbd.api.report("json", data={"success": True, "restart_req": sabnzbd.RESTART_REQ})
else:
raise Raiser(self.__root)
def change_web_dir(web_dir):
try:
web_dir, web_color = web_dir.split(" - ")
except:
try:
web_color = DEF_SKIN_COLORS[web_dir.lower()]
except:
web_color = ""
web_dir_path = real_path(sabnzbd.DIR_INTERFACES, web_dir)
if not os.path.exists(web_dir_path):
return badParameterResponse("Cannot find web template: %s" % web_dir_path)
else:
cfg.web_dir.set(web_dir)
cfg.web_color.set(web_color)
##############################################################################
class ConfigServer:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
new = []
servers = config.get_servers()
server_names = sorted(
list(servers.keys()),
key=lambda svr: "%d%02d%s"
% (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()),
)
for svr in server_names:
new.append(servers[svr].get_dict(safe=True))
t, m, w, d, timeline = BPSMeter.do.amounts(svr)
if t:
new[-1]["amounts"] = to_units(t), to_units(m), to_units(w), to_units(d), timeline
conf["servers"] = new
conf["cats"] = list_cats(default=True)
conf["certificate_validation"] = sabnzbd.CERTIFICATE_VALIDATION
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_server.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def addServer(self, **kwargs):
return handle_server(kwargs, self.__root, True)
@secured_expose(check_api_key=True, check_configlock=True)
def saveServer(self, **kwargs):
return handle_server(kwargs, self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def testServer(self, **kwargs):
return handle_server_test(kwargs, self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def delServer(self, **kwargs):
kwargs["section"] = "servers"
kwargs["keyword"] = kwargs.get("server")
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def clrServer(self, **kwargs):
server = kwargs.get("server")
if server:
BPSMeter.do.clear_server(server)
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def toggleServer(self, **kwargs):
server = kwargs.get("server")
if server:
svr = config.get_config("servers", server)
if svr:
svr.enable.set(not svr.enable())
config.save_config()
Downloader.do.update_server(server, server)
raise Raiser(self.__root)
def unique_svr_name(server):
""" Return a unique variant on given server name """
num = 0
svr = 1
new_name = server
while svr:
if num:
new_name = "%s@%d" % (server, num)
else:
new_name = "%s" % server
svr = config.get_config("servers", new_name)
num += 1
return new_name
def check_server(host, port, ajax):
""" Check if server address resolves properly """
if host.lower() == "localhost" and sabnzbd.AMBI_LOCALHOST:
return badParameterResponse(T("Warning: LOCALHOST is ambiguous, use numerical IP-address."), ajax)
if GetServerParms(host, int_conv(port)):
return ""
else:
return badParameterResponse(T('Server address "%s:%s" is not valid.') % (host, port), ajax)
def handle_server(kwargs, root=None, new_svr=False):
""" Internal server handler """
ajax = kwargs.get("ajax")
host = kwargs.get("host", "").strip()
if not host:
return badParameterResponse(T("Server address required"), ajax)
port = kwargs.get("port", "").strip()
if not port:
if not kwargs.get("ssl", "").strip():
port = "119"
else:
port = "563"
kwargs["port"] = port
if kwargs.get("connections", "").strip() == "":
kwargs["connections"] = "1"
if kwargs.get("enable") == "1":
msg = check_server(host, port, ajax)
if msg:
return msg
# Default server name is just the host name
server = host
svr = None
old_server = kwargs.get("server")
if old_server:
svr = config.get_config("servers", old_server)
if svr:
server = old_server
else:
svr = config.get_config("servers", server)
if new_svr:
server = unique_svr_name(server)
for kw in ("ssl", "send_group", "enable", "optional"):
if kw not in kwargs.keys():
kwargs[kw] = None
if svr and not new_svr:
svr.set_dict(kwargs)
else:
old_server = None
config.ConfigServer(server, kwargs)
config.save_config()
Downloader.do.update_server(old_server, server)
if root:
if ajax:
return sabnzbd.api.report("json")
else:
raise Raiser(root)
def handle_server_test(kwargs, root):
_result, msg = test_nntp_server_dict(kwargs)
return msg
##############################################################################
class ConfigRss:
def __init__(self, root):
self.__root = root
self.__refresh_readout = None # Set to URL when new readout is needed
self.__refresh_download = False # True when feed needs to be read
self.__refresh_force = False # True if forced download of all matches is required
self.__refresh_ignore = False # True if first batch of new feed must be ignored
self.__evaluate = False # True if feed needs to be re-filtered
self.__show_eval_button = False # True if the "Apply filers" button should be shown
self.__last_msg = "" # Last error message from RSS reader
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["scripts"] = list_scripts(default=True)
pick_script = conf["scripts"] != []
conf["categories"] = list_cats(default=True)
pick_cat = conf["categories"] != []
conf["rss_rate"] = cfg.rss_rate()
rss = {}
feeds = config.get_rss()
for feed in feeds:
rss[feed] = feeds[feed].get_dict()
filters = feeds[feed].filters()
rss[feed]["filters"] = filters
rss[feed]["filter_states"] = [bool(sabnzbd.rss.convert_filter(f[4])) for f in filters]
rss[feed]["filtercount"] = len(filters)
rss[feed]["pick_cat"] = pick_cat
rss[feed]["pick_script"] = pick_script
rss[feed]["link"] = urllib.parse.quote_plus(feed.encode("utf-8"))
rss[feed]["baselink"] = [get_base_url(uri) for uri in rss[feed]["uri"]]
rss[feed]["uris"] = feeds[feed].uri.get_string()
active_feed = kwargs.get("feed", "")
conf["active_feed"] = active_feed
conf["rss"] = rss
conf["rss_next"] = time.strftime(time_format("%H:%M"), time.localtime(sabnzbd.rss.next_run()))
if active_feed:
readout = bool(self.__refresh_readout)
logging.debug("RSS READOUT = %s", readout)
if not readout:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
if self.__evaluate:
msg = sabnzbd.rss.run_feed(
active_feed,
download=self.__refresh_download,
force=self.__refresh_force,
ignoreFirst=self.__refresh_ignore,
readout=readout,
)
else:
msg = ""
self.__evaluate = False
if readout:
sabnzbd.rss.save()
self.__last_msg = msg
else:
msg = self.__last_msg
self.__refresh_readout = None
conf["evalButton"] = self.__show_eval_button
conf["error"] = msg
conf["downloaded"], conf["matched"], conf["unmatched"] = GetRssLog(active_feed)
else:
self.__last_msg = ""
# Find a unique new Feed name
unum = 1
txt = T("Feed") # : Used as default Feed name in Config->RSS
while txt + str(unum) in feeds:
unum += 1
conf["feed"] = txt + str(unum)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_rss.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def save_rss_rate(self, **kwargs):
""" Save changed RSS automatic readout rate """
cfg.rss_rate.set(kwargs.get("rss_rate"))
config.save_config()
scheduler.restart()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def upd_rss_feed(self, **kwargs):
"""Update Feed level attributes,
legacy version: ignores 'enable' parameter
"""
if kwargs.get("enable") is not None:
del kwargs["enable"]
try:
cf = config.get_rss()[kwargs.get("feed")]
except KeyError:
cf = None
uri = Strip(kwargs.get("uri"))
if cf and uri:
kwargs["uri"] = uri
cf.set_dict(kwargs)
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def save_rss_feed(self, **kwargs):
""" Update Feed level attributes """
try:
cf = config.get_rss()[kwargs.get("feed")]
except KeyError:
cf = None
if "enable" not in kwargs:
kwargs["enable"] = 0
uri = Strip(kwargs.get("uri"))
if cf and uri:
kwargs["uri"] = uri
cf.set_dict(kwargs)
config.save_config()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def toggle_rss_feed(self, **kwargs):
""" Toggle automatic read-out flag of Feed """
try:
item = config.get_rss()[kwargs.get("feed")]
except KeyError:
item = None
if cfg:
item.enable.set(not item.enable())
config.save_config()
if kwargs.get("table"):
raise Raiser(self.__root)
else:
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def add_rss_feed(self, **kwargs):
""" Add one new RSS feed definition """
feed = Strip(kwargs.get("feed")).strip("[]")
uri = Strip(kwargs.get("uri"))
if feed and uri:
try:
cfg = config.get_rss()[feed]
except KeyError:
cfg = None
if (not cfg) and uri:
kwargs["feed"] = feed
kwargs["uri"] = uri
config.ConfigRSS(feed, kwargs)
# Clear out any existing reference to this feed name
# Otherwise first-run detection can fail
sabnzbd.rss.clear_feed(feed)
config.save_config()
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
else:
raise Raiser(self.__root)
else:
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def upd_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_upd_rss_filter(**kwargs)
def internal_upd_rss_filter(self, **kwargs):
""" Save updated filter definition """
try:
feed_cfg = config.get_rss()[kwargs.get("feed")]
except KeyError:
raise rssRaiser(self.__root, kwargs)
pp = kwargs.get("pp")
if IsNone(pp):
pp = ""
script = ConvertSpecials(kwargs.get("script"))
cat = ConvertSpecials(kwargs.get("cat"))
prio = ConvertSpecials(kwargs.get("priority"))
filt = kwargs.get("filter_text")
enabled = kwargs.get("enabled", "0")
if filt:
feed_cfg.filters.update(
int(kwargs.get("index", 0)), (cat, pp, script, kwargs.get("filter_type"), filt, prio, enabled)
)
# Move filter if requested
index = int_conv(kwargs.get("index", ""))
new_index = kwargs.get("new_index", "")
if new_index and int_conv(new_index) != index:
feed_cfg.filters.move(int(index), int_conv(new_index))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def del_rss_feed(self, *args, **kwargs):
""" Remove complete RSS feed """
kwargs["section"] = "rss"
kwargs["keyword"] = kwargs.get("feed")
del_from_section(kwargs)
sabnzbd.rss.clear_feed(kwargs.get("feed"))
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def del_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_del_rss_filter(**kwargs)
def internal_del_rss_filter(self, **kwargs):
""" Remove one RSS filter """
try:
feed_cfg = config.get_rss()[kwargs.get("feed")]
except KeyError:
raise rssRaiser(self.__root, kwargs)
feed_cfg.filters.delete(int(kwargs.get("index", 0)))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def download_rss_feed(self, *args, **kwargs):
""" Force download of all matching jobs in a feed """
if "feed" in kwargs:
feed = kwargs["feed"]
self.__refresh_readout = feed
self.__refresh_download = True
self.__refresh_force = True
self.__refresh_ignore = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def clean_rss_jobs(self, *args, **kwargs):
""" Remove processed RSS jobs from UI """
sabnzbd.rss.clear_downloaded(kwargs["feed"])
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def test_rss_feed(self, *args, **kwargs):
""" Read the feed content again and show results """
if "feed" in kwargs:
feed = kwargs["feed"]
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
self.__show_eval_button = False
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def eval_rss_feed(self, *args, **kwargs):
""" Re-apply the filters to the feed """
if "feed" in kwargs:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
self.__show_eval_button = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def download(self, **kwargs):
""" Download NZB from provider (Download button) """
feed = kwargs.get("feed")
url = kwargs.get("url")
nzbname = kwargs.get("nzbname")
att = sabnzbd.rss.lookup_url(feed, url)
if att:
pp = att.get("pp")
cat = att.get("cat")
script = att.get("script")
prio = att.get("prio")
if url:
sabnzbd.add_url(url, pp, script, cat, prio, nzbname)
# Need to pass the title instead
sabnzbd.rss.flag_downloaded(feed, url)
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_api_key=True, check_configlock=True)
def rss_now(self, *args, **kwargs):
""" Run an automatic RSS run now """
scheduler.force_rss()
raise rssRaiser(self.__root, kwargs)
def ConvertSpecials(p):
""" Convert None to 'None' and 'Default' to '' """
if p is None:
p = "None"
elif p.lower() == T("Default").lower():
p = ""
return p
def IsNone(value):
""" Return True if either None, 'None' or '' """
return value is None or value == "" or value.lower() == "none"
def Strip(txt):
""" Return stripped string, can handle None """
try:
return txt.strip()
except:
return None
##############################################################################
_SCHED_ACTIONS = (
"resume",
"pause",
"pause_all",
"shutdown",
"restart",
"speedlimit",
"pause_post",
"resume_post",
"scan_folder",
"rss_scan",
"remove_failed",
"remove_completed",
"pause_all_low",
"pause_all_normal",
"pause_all_high",
"resume_all_low",
"resume_all_normal",
"resume_all_high",
"enable_quota",
"disable_quota",
)
class ConfigScheduling:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def get_days():
days = {
"*": T("Daily"),
"1": T("Monday"),
"2": T("Tuesday"),
"3": T("Wednesday"),
"4": T("Thursday"),
"5": T("Friday"),
"6": T("Saturday"),
"7": T("Sunday"),
}
return days
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
actions = []
actions.extend(_SCHED_ACTIONS)
day_names = get_days()
categories = list_cats(False)
snum = 1
conf["schedlines"] = []
conf["taskinfo"] = []
for ev in scheduler.sort_schedules(all_events=False):
line = ev[3]
conf["schedlines"].append(line)
try:
enabled, m, h, day_numbers, action = line.split(" ", 4)
except:
continue
action = action.strip()
try:
action, value = action.split(" ", 1)
except:
value = ""
value = value.strip()
if value and not value.lower().strip("0123456789kmgtp%."):
if "%" not in value and from_units(value) < 1.0:
value = T("off") # : "Off" value for speedlimit in scheduler
else:
if "%" not in value and 1 < int_conv(value) < 101:
value += "%"
value = value.upper()
if action in actions:
action = Ttemplate("sch-" + action)
else:
if action in ("enable_server", "disable_server"):
try:
value = '"%s"' % config.get_servers()[value].displayname()
except KeyError:
value = '"%s" <<< %s' % (value, T("Undefined server!"))
action = Ttemplate("sch-" + action)
if action in ("pause_cat", "resume_cat"):
action = Ttemplate("sch-" + action)
if value not in categories:
# Category name change
value = '"%s" <<< %s' % (value, T("Incorrect parameter"))
else:
value = '"%s"' % value
if day_numbers == "1234567":
days_of_week = "Daily"
elif day_numbers == "12345":
days_of_week = "Weekdays"
elif day_numbers == "67":
days_of_week = "Weekends"
else:
days_of_week = ", ".join([day_names.get(i, "**") for i in day_numbers])
item = (snum, "%02d" % int(h), "%02d" % int(m), days_of_week, "%s %s" % (action, value), enabled)
conf["taskinfo"].append(item)
snum += 1
actions_lng = {}
for action in actions:
actions_lng[action] = Ttemplate("sch-" + action)
actions_servers = {}
servers = config.get_servers()
for srv in servers:
actions_servers[srv] = servers[srv].displayname()
conf["actions_servers"] = actions_servers
conf["actions"] = actions
conf["actions_lng"] = actions_lng
conf["categories"] = categories
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_scheduling.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def addSchedule(self, **kwargs):
servers = config.get_servers()
minute = kwargs.get("minute")
hour = kwargs.get("hour")
days_of_week = "".join([str(x) for x in kwargs.get("daysofweek", "")])
if not days_of_week:
days_of_week = "1234567"
action = kwargs.get("action")
arguments = kwargs.get("arguments")
arguments = arguments.strip().lower()
if arguments in ("on", "enable"):
arguments = "1"
elif arguments in ("off", "disable"):
arguments = "0"
if minute and hour and days_of_week and action:
if action == "speedlimit":
if not arguments or arguments.strip("0123456789kmgtp%."):
arguments = 0
elif action in _SCHED_ACTIONS:
arguments = ""
elif action in servers:
if arguments == "1":
arguments = action
action = "enable_server"
else:
arguments = action
action = "disable_server"
elif action in ("pause_cat", "resume_cat"):
# Need original category name, not lowercased
arguments = arguments.strip()
else:
# Something else, leave empty
action = None
if action:
sched = cfg.schedules()
sched.append("%s %s %s %s %s %s" % (1, minute, hour, days_of_week, action, arguments))
cfg.schedules.set(sched)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def delSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get("line")
if line and line in schedules:
schedules.remove(line)
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def toggleSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get("line")
if line:
for i, schedule in enumerate(schedules):
if schedule == line:
# Toggle the schedule
schedule_split = schedule.split()
schedule_split[0] = "%d" % (schedule_split[0] == "0")
schedules[i] = " ".join(schedule_split)
break
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
##############################################################################
class ConfigCats:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["scripts"] = list_scripts(default=True)
conf["defdir"] = cfg.complete_dir.get_clipped_path()
categories = config.get_ordered_categories()
conf["have_cats"] = len(categories) > 1
slotinfo = []
for cat in categories:
cat["newzbin"] = cat["newzbin"].replace('"', """)
slotinfo.append(cat)
# Add empty line
empty = {
"name": "",
"order": "0",
"pp": "-1",
"script": "",
"dir": "",
"newzbin": "",
"priority": DEFAULT_PRIORITY,
}
slotinfo.insert(1, empty)
conf["slotinfo"] = slotinfo
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_cat.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def delete(self, **kwargs):
kwargs["section"] = "categories"
kwargs["keyword"] = kwargs.get("name")
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_api_key=True, check_configlock=True)
def save(self, **kwargs):
name = kwargs.get("name", "*")
if name == "*":
newname = name
else:
newname = re.sub('"', "", kwargs.get("newname", ""))
if newname:
# Check if this cat-dir is not sub-folder of incomplete
if same_file(cfg.download_dir.get_path(), real_path(cfg.complete_dir.get_path(), kwargs["dir"])):
return T("Category folder cannot be a subfolder of the Temporary Download Folder.")
# Delete current one and replace with new one
if name:
config.delete("categories", name)
config.ConfigCat(newname.lower(), kwargs)
config.save_config()
raise Raiser(self.__root)
##############################################################################
SORT_LIST = (
"enable_tv_sorting",
"tv_sort_string",
"tv_categories",
"enable_movie_sorting",
"movie_sort_string",
"movie_sort_extra",
"movie_extra_folder",
"enable_date_sorting",
"date_sort_string",
"movie_categories",
"date_categories",
)
class ConfigSorting:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["complete_dir"] = cfg.complete_dir.get_clipped_path()
for kw in SORT_LIST:
conf[kw] = config.get_config("misc", kw)()
conf["categories"] = list_cats(False)
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_sorting.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveSorting(self, **kwargs):
try:
kwargs["movie_categories"] = kwargs["movie_cat"]
except:
pass
try:
kwargs["date_categories"] = kwargs["date_cat"]
except:
pass
try:
kwargs["tv_categories"] = kwargs["tv_cat"]
except:
pass
for kw in SORT_LIST:
item = config.get_config("misc", kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
LOG_API_RE = re.compile(rb"(apikey|api)(=|:)[\w]+", re.I)
LOG_API_JSON_RE = re.compile(rb"'(apikey|api)': '[\w]+'", re.I)
LOG_USER_RE = re.compile(rb"(user|username)\s?=\s?[\S]+", re.I)
LOG_PASS_RE = re.compile(rb"(password)\s?=\s?[\S]+", re.I)
LOG_INI_HIDE_RE = re.compile(
rb"(email_pwd|email_account|email_to|rating_api_key|pushover_token|pushover_userkey|pushbullet_apikey|prowl_apikey|growl_password|growl_server|IPv[4|6] address)\s?=\s?[\S]+",
re.I,
)
LOG_HASH_RE = re.compile(rb"([a-fA-F\d]{25})", re.I)
class Status:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
header = build_status(skip_dashboard=kwargs.get("skip_dashboard"))
template = Template(
file=os.path.join(sabnzbd.WEB_DIR, "status.tmpl"), searchList=[header], compilerSettings=CHEETAH_DIRECTIVES
)
return template.respond()
@secured_expose(check_api_key=True)
def reset_quota(self, **kwargs):
BPSMeter.do.reset_quota(force=True)
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def disconnect(self, **kwargs):
Downloader.do.disconnect()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def refresh_conn(self, **kwargs):
# No real action, just reload the page
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def showlog(self, **kwargs):
try:
sabnzbd.LOGHANDLER.flush()
except:
pass
# Fetch the INI and the log-data and add a message at the top
log_data = b"--------------------------------\n\n"
log_data += b"The log includes a copy of your sabnzbd.ini with\nall usernames, passwords and API-keys removed."
log_data += b"\n\n--------------------------------\n"
with open(sabnzbd.LOGFILE, "rb") as f:
log_data += f.read()
with open(config.get_filename(), "rb") as f:
log_data += f.read()
# We need to remove all passwords/usernames/api-keys
log_data = LOG_API_RE.sub(b"apikey=<APIKEY>", log_data)
log_data = LOG_API_JSON_RE.sub(b"'apikey':<APIKEY>'", log_data)
log_data = LOG_USER_RE.sub(b"\\g<1>=<USER>", log_data)
log_data = LOG_PASS_RE.sub(b"password=<PASSWORD>", log_data)
log_data = LOG_INI_HIDE_RE.sub(b"\\1 = <REMOVED>", log_data)
log_data = LOG_HASH_RE.sub(b"<HASH>", log_data)
# Try to replace the username
try:
import getpass
cur_user = getpass.getuser()
if cur_user:
log_data = log_data.replace(utob(cur_user), b"<USERNAME>")
except:
pass
# Set headers
cherrypy.response.headers["Content-Type"] = "application/x-download;charset=utf-8"
cherrypy.response.headers["Content-Disposition"] = 'attachment;filename="sabnzbd.log"'
return log_data
@secured_expose(check_api_key=True)
def clearwarnings(self, **kwargs):
sabnzbd.GUIHANDLER.clear()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def change_loglevel(self, **kwargs):
cfg.log_level.set(kwargs.get("loglevel"))
config.save_config()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def unblock_server(self, **kwargs):
Downloader.do.unblock(kwargs.get("server"))
# Short sleep so that UI shows new server status
time.sleep(1.0)
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def delete(self, **kwargs):
orphan_delete(kwargs)
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def delete_all(self, **kwargs):
orphan_delete_all()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def add(self, **kwargs):
orphan_add(kwargs)
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def add_all(self, **kwargs):
orphan_add_all()
raise Raiser(self.__root)
@secured_expose(check_api_key=True)
def dashrefresh(self, **kwargs):
# This function is run when Refresh button on Dashboard is clicked
# Put the time consuming dashboard functions here; they only get executed when the user clicks the Refresh button
# PyStone
sabnzbd.PYSTONE_SCORE = getpystone()
# Diskspeed of download (aka incomplete) directory:
dir_speed = diskspeedmeasure(sabnzbd.cfg.download_dir.get_path())
if dir_speed:
sabnzbd.DOWNLOAD_DIR_SPEED = round(dir_speed, 1)
else:
sabnzbd.DOWNLOAD_DIR_SPEED = 0
time.sleep(1.0)
# Diskspeed of complete directory:
dir_speed = diskspeedmeasure(sabnzbd.cfg.complete_dir.get_path())
if dir_speed:
sabnzbd.COMPLETE_DIR_SPEED = round(dir_speed, 1)
else:
sabnzbd.COMPLETE_DIR_SPEED = 0
# Internet bandwidth
sabnzbd.INTERNET_BANDWIDTH = round(internetspeed(), 1)
raise Raiser(self.__root) # Refresh screen
def orphan_delete(kwargs):
path = kwargs.get("name")
if path:
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info("Removing orphaned job %s", path)
remove_all(path, recursive=True)
def orphan_delete_all():
paths = NzbQueue.do.scan_jobs(all_jobs=False, action=False)
for path in paths:
kwargs = {"name": path}
orphan_delete(kwargs)
def orphan_add(kwargs):
path = kwargs.get("name")
if path:
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info("Re-adding orphaned job %s", path)
NzbQueue.do.repair_job(path, None, None)
def orphan_add_all():
paths = NzbQueue.do.scan_jobs(all_jobs=False, action=False)
for path in paths:
kwargs = {"name": path}
orphan_add(kwargs)
def badParameterResponse(msg, ajax=None):
""" Return a html page with error message and a 'back' button """
if ajax:
return sabnzbd.api.report("json", error=msg)
else:
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>SABnzbd %s - %s</title>
</head>
<body>
<h3>%s</h3>
%s
<br><br>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
</body>
</html>
""" % (
sabnzbd.__version__,
T("ERROR:"),
T("Incorrect parameter"),
msg,
T("Back"),
)
def ShowString(name, msg):
""" Return a html page listing a file and a 'back' button """
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>%s</title>
</head>
<body>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
<h3>%s</h3>
<code><pre>%s</pre></code>
</body>
</html>
""" % (
xml_name(name),
T("Back"),
xml_name(name),
escape(msg),
)
def GetRssLog(feed):
def make_item(job):
# Make a copy
job = job.copy()
# Now we apply some formatting
job["title"] = job["title"]
job["skip"] = "*" * int(job.get("status", "").endswith("*"))
# These fields could be empty
job["cat"] = job.get("cat", "")
job["size"] = job.get("size", "")
job["infourl"] = job.get("infourl", "")
# Auto-fetched jobs didn't have these fields set
if job.get("url"):
job["baselink"] = get_base_url(job.get("url"))
if sabnzbd.rss.special_rss_site(job.get("url")):
job["nzbname"] = ""
else:
job["nzbname"] = job["title"]
else:
job["baselink"] = ""
job["nzbname"] = job["title"]
if job.get("size", 0):
job["size_units"] = to_units(job["size"])
else:
job["size_units"] = "-"
# And we add extra fields for sorting
if job.get("age", 0):
job["age_ms"] = (job["age"] - datetime.utcfromtimestamp(0)).total_seconds()
job["age"] = calc_age(job["age"], True)
else:
job["age_ms"] = ""
job["age"] = ""
if job.get("time_downloaded"):
job["time_downloaded_ms"] = time.mktime(job["time_downloaded"])
job["time_downloaded"] = time.strftime(time_format("%H:%M %a %d %b"), job["time_downloaded"])
else:
job["time_downloaded_ms"] = ""
job["time_downloaded"] = ""
return job
jobs = list(sabnzbd.rss.show_result(feed).values())
good, bad, done = ([], [], [])
for job in jobs:
if job["status"][0] == "G":
good.append(make_item(job))
elif job["status"][0] == "B":
bad.append(make_item(job))
elif job["status"] == "D":
done.append(make_item(job))
try:
# Sort based on actual age, in try-catch just to be sure
good.sort(key=lambda job: job["age_ms"], reverse=True)
bad.sort(key=lambda job: job["age_ms"], reverse=True)
done.sort(key=lambda job: job["time_downloaded_ms"], reverse=True)
except:
# Let the javascript do it then..
pass
return done, good, bad
##############################################################################
LIST_EMAIL = (
"email_endjob",
"email_cats",
"email_full",
"email_server",
"email_to",
"email_from",
"email_account",
"email_pwd",
"email_rss",
)
LIST_NCENTER = (
"ncenter_enable",
"ncenter_cats",
"ncenter_prio_startup",
"ncenter_prio_download",
"ncenter_prio_pause_resume",
"ncenter_prio_pp",
"ncenter_prio_pp",
"ncenter_prio_complete",
"ncenter_prio_failed",
"ncenter_prio_disk_full",
"ncenter_prio_warning",
"ncenter_prio_error",
"ncenter_prio_queue_done",
"ncenter_prio_other",
"ncenter_prio_new_login",
)
LIST_ACENTER = (
"acenter_enable",
"acenter_cats",
"acenter_prio_startup",
"acenter_prio_download",
"acenter_prio_pause_resume",
"acenter_prio_pp",
"acenter_prio_complete",
"acenter_prio_failed",
"acenter_prio_disk_full",
"acenter_prio_warning",
"acenter_prio_error",
"acenter_prio_queue_done",
"acenter_prio_other",
"acenter_prio_new_login",
)
LIST_NTFOSD = (
"ntfosd_enable",
"ntfosd_cats",
"ntfosd_prio_startup",
"ntfosd_prio_download",
"ntfosd_prio_pause_resume",
"ntfosd_prio_pp",
"ntfosd_prio_complete",
"ntfosd_prio_failed",
"ntfosd_prio_disk_full",
"ntfosd_prio_warning",
"ntfosd_prio_error",
"ntfosd_prio_queue_done",
"ntfosd_prio_other",
"ntfosd_prio_new_login",
)
LIST_PROWL = (
"prowl_enable",
"prowl_cats",
"prowl_apikey",
"prowl_prio_startup",
"prowl_prio_download",
"prowl_prio_pause_resume",
"prowl_prio_pp",
"prowl_prio_complete",
"prowl_prio_failed",
"prowl_prio_disk_full",
"prowl_prio_warning",
"prowl_prio_error",
"prowl_prio_queue_done",
"prowl_prio_other",
"prowl_prio_new_login",
)
LIST_PUSHOVER = (
"pushover_enable",
"pushover_cats",
"pushover_token",
"pushover_userkey",
"pushover_device",
"pushover_prio_startup",
"pushover_prio_download",
"pushover_prio_pause_resume",
"pushover_prio_pp",
"pushover_prio_complete",
"pushover_prio_failed",
"pushover_prio_disk_full",
"pushover_prio_warning",
"pushover_prio_error",
"pushover_prio_queue_done",
"pushover_prio_other",
"pushover_prio_new_login",
"pushover_emergency_retry",
"pushover_emergency_expire",
)
LIST_PUSHBULLET = (
"pushbullet_enable",
"pushbullet_cats",
"pushbullet_apikey",
"pushbullet_device",
"pushbullet_prio_startup",
"pushbullet_prio_download",
"pushbullet_prio_pause_resume",
"pushbullet_prio_pp",
"pushbullet_prio_complete",
"pushbullet_prio_failed",
"pushbullet_prio_disk_full",
"pushbullet_prio_warning",
"pushbullet_prio_error",
"pushbullet_prio_queue_done",
"pushbullet_prio_other",
"pushbullet_prio_new_login",
)
LIST_NSCRIPT = (
"nscript_enable",
"nscript_cats",
"nscript_script",
"nscript_parameters",
"nscript_prio_startup",
"nscript_prio_download",
"nscript_prio_pause_resume",
"nscript_prio_pp",
"nscript_prio_complete",
"nscript_prio_failed",
"nscript_prio_disk_full",
"nscript_prio_warning",
"nscript_prio_error",
"nscript_prio_queue_done",
"nscript_prio_other",
"nscript_prio_new_login",
)
class ConfigNotify:
def __init__(self, root):
self.__root = root
self.__lastmail = None
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf["categories"] = list_cats(False)
conf["lastmail"] = self.__lastmail
conf["have_ntfosd"] = sabnzbd.notifier.have_ntfosd()
conf["have_ncenter"] = sabnzbd.DARWIN and sabnzbd.FOUNDATION
conf["scripts"] = list_scripts(default=False, none=True)
for kw in LIST_EMAIL:
conf[kw] = config.get_config("misc", kw).get_string()
for kw in LIST_PROWL:
conf[kw] = config.get_config("prowl", kw)()
for kw in LIST_PUSHOVER:
conf[kw] = config.get_config("pushover", kw)()
for kw in LIST_PUSHBULLET:
conf[kw] = config.get_config("pushbullet", kw)()
for kw in LIST_NCENTER:
conf[kw] = config.get_config("ncenter", kw)()
for kw in LIST_ACENTER:
conf[kw] = config.get_config("acenter", kw)()
for kw in LIST_NTFOSD:
conf[kw] = config.get_config("ntfosd", kw)()
for kw in LIST_NSCRIPT:
conf[kw] = config.get_config("nscript", kw)()
conf["notify_types"] = sabnzbd.notifier.NOTIFICATION
template = Template(
file=os.path.join(sabnzbd.WEB_DIR_CONFIG, "config_notify.tmpl"),
searchList=[conf],
compilerSettings=CHEETAH_DIRECTIVES,
)
return template.respond()
@secured_expose(check_api_key=True, check_configlock=True)
def saveEmail(self, **kwargs):
ajax = kwargs.get("ajax")
for kw in LIST_EMAIL:
msg = config.get_config("misc", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_NCENTER:
msg = config.get_config("ncenter", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_ACENTER:
msg = config.get_config("acenter", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_NTFOSD:
msg = config.get_config("ntfosd", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_PROWL:
msg = config.get_config("prowl", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_PUSHOVER:
msg = config.get_config("pushover", kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_PUSHBULLET:
msg = config.get_config("pushbullet", kw).set(kwargs.get(kw, 0))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
for kw in LIST_NSCRIPT:
msg = config.get_config("nscript", kw).set(kwargs.get(kw, 0))
if msg:
return badParameterResponse(T("Incorrect value for %s: %s") % (kw, msg), ajax)
config.save_config()
self.__lastmail = None
if ajax:
return sabnzbd.api.report("json")
else:
raise Raiser(self.__root)
|
encoder.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-2016 Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from . import _sixel
from libsixel import *
class Encoder(object):
def __init__(self):
self._encoder = sixel_encoder_new()
def __del__(self):
sixel_encoder_unref(self._encoder)
def setopt(self, flag, arg=None):
sixel_encoder_setopt(self._encoder, flag, arg)
def encode(self, filename="-"):
sixel_encoder_encode(self._encoder, filename)
def encode_bytes(self, buf, width, height, pixelformat, palette):
sixel_encoder_encode_bytes(self._encoder, buf, width, height, pixelformat, palette)
def test(self, filename):
import threading
self.setopt(SIXEL_OPTFLAG_COLORS, 16)
self.setopt(SIXEL_OPTFLAG_DIFFUSION, "atkinson")
self.setopt(SIXEL_OPTFLAG_WIDTH, 200)
t = threading.Thread(target=self.encode, args=[filename])
t.daemon = True
t.start()
try:
while t.is_alive():
t.join(1)
except KeyboardInterrupt:
print("\033\\\033[Jcanceled.")
if __name__ == '__main__':
import sys
arg1 = "-" if len(sys.argv) < 2 else sys.argv[1]
Encoder().test(arg1)
|
analysis_subprocess.py
|
#####################################################################
# #
# /analysis_subprocess.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program lyse, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import labscript_utils.excepthook
import zprocess
to_parent, from_parent, kill_lock = zprocess.setup_connection_with_parent(lock = True)
import sys
import os
import threading
import traceback
import time
from qtutils.qt import QtCore, QtGui, QtWidgets, QT_ENV, PYQT5
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils.qt.QtCore import pyqtSlot as Slot
import matplotlib
if QT_ENV == PYQT5:
matplotlib.use("QT5Agg")
else:
matplotlib.use("QT4Agg")
import lyse
lyse.spinning_top = True
import lyse.figure_manager
lyse.figure_manager.install()
if QT_ENV == PYQT5:
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
else:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import pylab
import zprocess.locking, labscript_utils.h5_lock, h5py
import zprocess
from qtutils import inmain, inmain_later, inmain_decorator, UiLoader, inthread, DisconnectContextManager
import qtutils.icons
from labscript_utils.modulewatcher import ModuleWatcher
class _DeprecationDict(dict):
"""Dictionary that spouts deprecation warnings when you try to access some
keys."""
def __init__(self, *args, **kwargs):
self.deprecation_messages = {} # To be added to after the deprecated items are added to the dict.
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
if key in self.deprecation_messages:
import warnings
import linecache
# DeprecationWarnings are ignored by default. Clear the filter so
# they are not:
previous_warning_filters = warnings.filters[:]
try:
warnings.resetwarnings()
# Hacky stuff to get it to work from within execfile() with
# correct line data:
linecache.clearcache()
caller = sys._getframe(1)
globals = caller.f_globals
lineno = caller.f_lineno
module = globals['__name__']
filename = globals.get('__file__')
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
message = self.deprecation_messages[key]
warnings.warn_explicit(message, DeprecationWarning, filename, lineno, module)
finally:
# Restore the warnings filter:
warnings.filters[:] = previous_warning_filters
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if key in self.deprecation_messages:
# No longer deprecated if the user puts something in place of the
# originally deprecated item:
del self.deprecation_messages[key]
return dict.__setitem__(self, key, value)
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
class PlotWindow(QtWidgets.QWidget):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def event(self, event):
result = QtWidgets.QWidget.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.hide()
event.ignore()
class Plot(object):
def __init__(self, figure, identifier, filepath):
loader = UiLoader()
self.ui = loader.load('plot_window.ui', PlotWindow())
# Tell Windows how to handle our windows in the the taskbar, making pinning work properly and stuff:
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.set_window_title(identifier, filepath)
# figure.tight_layout()
self.figure = figure
self.canvas = figure.canvas
self.navigation_toolbar = NavigationToolbar(self.canvas, self.ui)
self.lock_action = self.navigation_toolbar.addAction(
QtGui.QIcon(':qtutils/fugue/lock-unlock'),
'Lock axes', self.on_lock_axes_triggered)
self.lock_action.setCheckable(True)
self.lock_action.setToolTip('Lock axes')
self.copy_to_clipboard_action = self.navigation_toolbar.addAction(
QtGui.QIcon(':qtutils/fugue/clipboard--arrow'),
'Copy to clipboard', self.on_copy_to_clipboard_triggered)
self.copy_to_clipboard_action.setToolTip('Copy to clipboard')
self.copy_to_clipboard_action.setShortcut(QtGui.QKeySequence.Copy)
self.ui.verticalLayout_canvas.addWidget(self.canvas)
self.ui.verticalLayout_navigation_toolbar.addWidget(self.navigation_toolbar)
self.lock_axes = False
self.axis_limits = None
self.update_window_size()
self.ui.show()
def on_lock_axes_triggered(self):
if self.lock_action.isChecked():
self.lock_axes = True
self.lock_action.setIcon(QtGui.QIcon(':qtutils/fugue/lock'))
else:
self.lock_axes = False
self.lock_action.setIcon(QtGui.QIcon(':qtutils/fugue/lock-unlock'))
def on_copy_to_clipboard_triggered(self):
lyse.figure_to_clipboard(self.figure)
@inmain_decorator()
def save_axis_limits(self):
axis_limits = {}
for i, ax in enumerate(self.figure.axes):
# Save the limits of the axes to restore them afterward:
axis_limits[i] = ax.get_xlim(), ax.get_ylim()
self.axis_limits = axis_limits
@inmain_decorator()
def clear(self):
self.figure.clear()
@inmain_decorator()
def restore_axis_limits(self):
for i, ax in enumerate(self.figure.axes):
try:
xlim, ylim = self.axis_limits[i]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
except KeyError:
continue
@inmain_decorator()
def set_window_title(self, identifier, filepath):
self.ui.setWindowTitle(str(identifier) + ' - ' + os.path.basename(filepath))
@inmain_decorator()
def update_window_size(self):
l, w = self.figure.get_size_inches()
dpi = self.figure.get_dpi()
self.canvas.resize(int(l*dpi),int(w*dpi))
self.ui.adjustSize()
@inmain_decorator()
def draw(self):
self.canvas.draw()
def show(self):
self.ui.show()
@property
def is_shown(self):
return self.ui.isVisible()
class AnalysisWorker(object):
def __init__(self, filepath, to_parent, from_parent):
self.to_parent = to_parent
self.from_parent = from_parent
self.filepath = filepath
# Filepath as a unicode string on py3 and a bytestring on py2,
# so that the right string type can be passed to functions that
# require the 'native' string type for that python version. On
# Python 2, encode it with the filesystem encoding.
if PY2:
self.filepath_native_string = self.filepath.encode(sys.getfilesystemencoding())
else:
self.filepath_native_string = self.filepath
# Add user script directory to the pythonpath:
sys.path.insert(0, os.path.dirname(self.filepath_native_string))
# Plot objects, keyed by matplotlib Figure object:
self.plots = {}
# An object with a method to unload user modules if any have
# changed on disk:
self.modulewatcher = ModuleWatcher()
# Start the thread that listens for instructions from the
# parent process:
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def mainloop(self):
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
task, data = self.from_parent.get()
with kill_lock:
if task == 'quit':
inmain(qapplication.quit)
elif task == 'analyse':
path = data
success = self.do_analysis(path)
if success:
self.to_parent.put(['done', lyse._updated_data])
else:
self.to_parent.put(['error', lyse._updated_data])
else:
self.to_parent.put(['error','invalid task %s'%str(task)])
@inmain_decorator()
def do_analysis(self, path):
now = time.strftime('[%x %X]')
if path is not None:
print('%s %s %s ' %(now, os.path.basename(self.filepath), os.path.basename(path)))
else:
print('%s %s' %(now, os.path.basename(self.filepath)))
self.pre_analysis_plot_actions()
# The namespace the routine will run in:
sandbox = _DeprecationDict(path=path,
__name__='__main__',
__file__= os.path.basename(self.filepath_native_string))
# path global variable is deprecated:
deprecation_message = ("use of 'path' global variable is deprecated and will be removed " +
"in a future version of lyse. Please use lyse.path, which defaults " +
"to sys.argv[1] when scripts are run stand-alone.")
sandbox.deprecation_messages['path'] = deprecation_message
# Use lyse.path instead:
lyse.path = path
lyse._updated_data = {}
# Save the current working directory before changing it to the
# location of the user's script:
cwd = os.getcwd()
os.chdir(os.path.dirname(self.filepath))
# Do not let the modulewatcher unload any modules whilst we're working:
try:
with self.modulewatcher.lock:
# Actually run the user's analysis!
with open(self.filepath) as f:
code = compile(f.read(), os.path.basename(self.filepath_native_string),
'exec', dont_inherit=True)
exec(code, sandbox)
except:
traceback_lines = traceback.format_exception(*sys.exc_info())
del traceback_lines[1]
# Avoiding a list comprehension here so as to avoid this
# python bug in earlier versions of 2.7 (fixed in 2.7.9):
# https://bugs.python.org/issue21591
message = ''
for line in traceback_lines:
if PY2:
# errors='replace' is for Windows filenames present in the
# traceback that are not UTF8. They will not display
# correctly, but that's the best we can do - the traceback
# may contain code from the file in a different encoding,
# so we could have a mixed encoding string. This is only
# a problem for Python 2.
line = line.decode('utf8', errors='replace')
message += line
sys.stderr.write(message)
return False
else:
return True
finally:
os.chdir(cwd)
print('')
self.post_analysis_plot_actions()
def pre_analysis_plot_actions(self):
for plot in self.plots.values():
plot.save_axis_limits()
plot.clear()
def post_analysis_plot_actions(self):
# reset the current figure to figure 1:
lyse.figure_manager.figuremanager.set_first_figure_current()
# Introspect the figures that were produced:
for identifier, fig in lyse.figure_manager.figuremanager.figs.items():
if not fig.axes:
continue
try:
plot = self.plots[fig]
except KeyError:
# If we don't already have this figure, make a window
# to put it in:
self.new_figure(fig, identifier)
else:
if not plot.is_shown:
plot.show()
plot.update_window_size()
plot.set_window_title(identifier, self.filepath)
if plot.lock_axes:
plot.restore_axis_limits()
plot.draw()
def new_figure(self, fig, identifier):
self.plots[fig] = Plot(fig, identifier, self.filepath)
def reset_figs(self):
pass
if __name__ == '__main__':
filepath = from_parent.get()
# Set a meaningful client id for zprocess.locking:
zprocess.locking.set_client_process_name('lyse-'+os.path.basename(filepath))
qapplication = QtWidgets.QApplication(sys.argv)
worker = AnalysisWorker(filepath, to_parent, from_parent)
qapplication.exec_()
|
test_gateway.py
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Document
from jina.enums import CompressAlgo
from jina.flow import Flow
from tests import random_docs
@pytest.mark.parametrize('compress_algo', list(CompressAlgo))
def test_compression(compress_algo, mocker):
response_mock = mocker.Mock()
f = (
Flow(compress=str(compress_algo))
.add()
.add(name='DummyEncoder', parallel=2)
.add()
)
with f:
f.index(random_docs(10), on_done=response_mock)
response_mock.assert_called()
@pytest.mark.parametrize('rest_api', [True, False])
def test_grpc_gateway_concurrency(rest_api):
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(f, status_codes, durations, index):
start = time.time()
f.index(
inputs=(Document() for _ in range(256)),
on_done=functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
),
batch_size=16,
)
f = Flow(restful=rest_api).add(parallel=2)
concurrency = 100
with f:
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(target=_request, args=(f, status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
print(f'terminate {t}')
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
sub.py
|
from time import time
import message_filters
from std_msgs.msg import String
from geometry_msgs.msg import TransformStamped
import rospy
def callback(s1,s2):
print('b')
print(s1.header.frame_id)
print(s2.header.frame_id)
t1 = TransformStamped()
t2 = TransformStamped()
def callback1(s1):
t1 = s1
print('1')
def callback2(s2):
t2 = s2
print('2')
import time
def kin():
while 1:
if (t1.header.stamp == t2.header.stamp):
print('3')
time.sleep(0.1)
if __name__=='__main__':
rospy.init_node('sub')
# sub1 = message_filters.Subscriber('/stringone',TransformStamped)
# sub2 = message_filters.Subscriber('/stringtwo',TransformStamped)
sub1 = rospy.Subscriber('stringone',TransformStamped,callback=callback1)
sub2 = rospy.Subscriber('stringtwo',TransformStamped,callback=callback2)
# import threading
# thread = threading.Thread(target=kin)
from multiprocessing import Process
p1 = Process(target=kin)
p1.start()
# ts = message_filters.TimeSynchronizer([sub1,sub2],queue_size=10)
print('a')
rospy.spin()
|
test_run_modules.py
|
# # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# # All trademark and other rights reserved by their respective owners
# # Copyright 2008-2021 Neongecko.com Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
from multiprocessing import Process
from time import time, sleep
from mycroft_bus_client import MessageBusClient, Message
from neon_speech.__main__ import main as neon_speech_main
from neon_audio.__main__ import main as neon_audio_main
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from neon_core.messagebus.service.__main__ import main as messagebus_service
AUDIO_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "audio_files")
# TODO: Depreciate this test; covered in test_run_neon
class TestModules(unittest.TestCase):
bus_thread = None
speech_thread = None
audio_thread = None
@classmethod
def setUpClass(cls) -> None:
cls.bus_thread = Process(target=messagebus_service, daemon=False)
cls.speech_thread = Process(target=neon_speech_main, daemon=False)
cls.audio_thread = Process(target=neon_audio_main, daemon=False)
cls.bus_thread.start()
cls.speech_thread.start()
cls.audio_thread.start()
sleep(60) # TODO: This shouldn't be necessary? DM
cls.bus = MessageBusClient()
cls.bus.run_in_thread()
@classmethod
def tearDownClass(cls) -> None:
cls.bus.close()
cls.bus_thread.terminate()
cls.speech_thread.terminate()
cls.audio_thread.terminate()
def setUp(self):
self.bus.connected_event.wait(30)
while not self.bus.started_running:
sleep(1)
def test_get_stt_valid_file(self):
self.assertTrue(self.speech_thread.is_alive())
context = {"client": "tester",
"ident": "12345",
"user": "TestRunner"}
stt_resp = self.bus.wait_for_response(Message("neon.get_stt", {"audio_file": os.path.join(AUDIO_FILE_PATH,
"stop.wav")},
context), context["ident"])
self.assertEqual(stt_resp.context, context)
self.assertIsInstance(stt_resp.data.get("parser_data"), dict)
self.assertIsInstance(stt_resp.data.get("transcripts"), list)
self.assertIn("stop", stt_resp.data.get("transcripts"))
def test_get_tts_valid_default(self):
self.assertTrue(self.audio_thread.is_alive())
text = "This is a test"
context = {"client": "tester",
"ident": str(time()),
"user": "TestRunner"}
tts_resp = self.bus.wait_for_response(Message("neon.get_tts", {"text": text}, context),
context["ident"], timeout=60)
self.assertEqual(tts_resp.context, context)
responses = tts_resp.data
self.assertIsInstance(responses, dict, responses)
print(responses)
self.assertEqual(len(responses), 1)
resp = list(responses.values())[0]
self.assertIsInstance(resp, dict)
self.assertEqual(resp.get("sentence"), text)
if __name__ == '__main__':
unittest.main()
|
image_server.py
|
import socket
import threading
#---------<CONFIG>-----------#
HEADER = 64 # number of bytes in header - will be used to convey how many bytes will be sent later
PORT = 5050
SERVER = "192.168.5.11"#"localhost"#192.168.5.11" # replace with LAN IPv4 address
ADDR = (SERVER , PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
#---------</CONFIG>-----------#
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
#--------------------------------------------------------------------#
import numpy as np
import cv2
import matplotlib.pyplot as plt
from bounding_box import bounding_box as bb
import time
import base64
import traceback
#--------------------------------------------------------------------#
LABELS = open("./classes.names.txt").read().strip().split("\n")
# derive the paths to the YOLO weights and model configuration
weightsPath = "yolov4_custom_train_new1.weights"
configPath = "yolov4_custom_test.cfg"
# Loading the neural network framework Darknet (YOLO was created based on this framework)
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
def predict(image):
# initialize a list of colors to represent each possible class label
np.random.seed(15)
COLORS = ["blue", "yellow", "red", "green"]
(H, W) = image.shape[:2]
# determine only the "ouput" layers name which we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward pass of the YOLO object detector,
# giving us our bounding boxes and associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=False, crop=False)
net.setInput(blob)
layerOutputs = net.forward(ln)
boxes = []
confidences = []
classIDs = []
threshold = 0.3
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
# confidence type=float, default=0.5
if confidence > threshold:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, threshold, 0.3)
print (idxs)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = str(np.random.choice(COLORS, 1)[0])
text = "{}".format(LABELS[classIDs[i]], confidences[i])
bb.add(image,x,y,x+w,y+h,text,color)
#cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
#cv2.putText(image, text, (x +15, y - 10), cv2.FONT_HERSHEY_SIMPLEX,1, color, 2)
return image, LABELS[classIDs[i]], confidences[i]
return image, None, None
#--------------------------------------------------------------------#
def receive_image(conn, addr):
print (f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
try:
msg_length = conn.recv(HEADER)#.decode(FORMAT) # Blocking code (wait until run)
if msg_length:
print (msg_length)
print (len(msg_length))
# msg_length = base64.b64decode(msg_length)
msg_length = int.from_bytes(msg_length, 'big')
# msg_length = int(msg_length)
print(msg_length)
# time.sleep(20)
if msg_length == -1:
connected = False
break
msg = conn.recv(msg_length)# Blocking code (wait until run)
image_bytes = base64.b64decode(msg)
image_bytes = np.fromstring(image_bytes, np.uint8)
image = np.resize(image_bytes, (120, 180, 3))
# cv2.imwrite("./saved_image.jpg", image)
image_processed , label, conf = predict(image)
cv2.imwrite("./image_processed.jpg", image_processed)
print ("IMAGE PROCESSED-------")
print (label, conf)
print ("[IMAGE SUCCESS]")
if label and conf:
conn.send("SUCCESS".encode(FORMAT))
else:
continue
except Exception as e:
print ("[ERROR] Image reception failed")
print (e)
traceback.print_exc()
continue
print ("[CLOSED] Connection Closed")
conn.close()
def start():
server.listen()
print (f"[LISTENING] Server is Listening on {SERVER}")
while True:
conn, addr = server.accept() # Blocking code (wait until run)
thread = threading.Thread(target=receive_image, args = (conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount()-1}")
print ("[STARTING] Server started... ")
start()
|
datasets.py
|
import pyrealsense2 as rs
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
class LoadRealSense2: # Stream from Intel RealSense D435
"""
https://github.com/GilbertTjahjono/Multiple_Object_Tracking
"""
def __init__(self, width=640, height=480, fps=30):
# Variabels for setup
self.mode = 'RealSense'
self.width = width
self.height = height
self.fps = fps
self.imgs = [None]
self.depths = [None]
self.img_size = 480
self.half = False
# Setup
self.pipe = rs.pipeline()
self.cfg = rs.config()
self.cfg.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
self.cfg.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.fps)
# Start streaming
self.profile = self.pipe.start(self.cfg)
self.path = rs.pipeline_profile()
print(self.path)
print("streaming at w = " + str(self.width) + " h = " + str(self.height) + " fps = " + str(self.fps))
def update(self):
while True:
#Wait for frames and get the data
self.frames = self.pipe.wait_for_frames()
self.depth_frame = self.frames.get_depth_frame()
self.color_frame = self.frames.get_color_frame()
if not self.depth_frame or not self.color_frame:
continue
img0 = np.asanyarray(self.color_frame.get_data())
#align + color depth -> for display purpose only
depth0 = self.colorizing(self.aligned(self.frames))
# aligned depth -> for depth calculation
distance0, depth_intrin, aligned_depth_frame = self.aligned_depth(self.frames)
#get depth_scale
depth_scale = self.scale(self.profile)
self.imgs = np.expand_dims(img0, axis=0)
self.depths = depth0
self.distance = distance0
break
#print("ini depth awal: " + str(np.shape(self.depths)))
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
#print("ini s: " + str(np.shape(s)))
self.rect = np.unique(s, axis=0).shape[0] == 1
#print("ini rect: " + str(np.shape(self.rect)))
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
time.sleep(0.01) # wait time
return self.rect, depth_scale, depth_intrin, aligned_depth_frame
def scale(self, profile):
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
return depth_scale
def aligned_depth(self, frames):
self.align = rs.align(rs.stream.color)
frames = self.align.process(frames)
aligned_depth_frame = frames.get_depth_frame()
depth_real = np.asanyarray(aligned_depth_frame.get_data())
depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics
return depth_real, depth_intrin, aligned_depth_frame
def aligned(self, frames):
self.align = rs.align(rs.stream.color)
frames = self.align.process(frames)
aligned_depth_frame = frames.get_depth_frame()
return aligned_depth_frame
def colorizing(self, aligned_depth_frame):
self.colorizer = rs.colorizer()
colorized_depth = np.asanyarray(self.colorizer.colorize(aligned_depth_frame).get_data())
return(colorized_depth)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
self.rect, depth_scale, depth_intrin, aligned_depth_frame = self.update()
img0 = self.imgs.copy()
depth = self.depths.copy()
distance = self.distance.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
img_path = 'realsense.mp4'
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
#print("ini img letterbox: " + str(np.shape(img)))
# Stack
img = np.stack(img, 0)
#print("ini img-padding: " + str(np.shape(img)))
# Convert Image
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to 3x416x416, uint8 to float32
img = np.ascontiguousarray(img)
# Return depth, depth0, img, img0
dis = {'distance': distance,
'depth_scale': depth_scale,
'depth_intrin': depth_intrin,
'aligned_depth_frame': aligned_depth_frame
}
return str(img_path), img, img0, dis
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
|
fake_ssh.py
|
#!/usr/bin/env python
"""Fake SSH Server Utilizing Paramiko"""
import argparse
import threading
import socket
import sys
import os
import traceback
import paramiko
import json
from getmac import get_mac_address
import email_alerts
def load_auth_file(filename):
with open(filename, "r") as auth_file:
auth = json.load(auth_file)
return auth
LOG = open("/usr/local/bin/fake-ssh/logs/log.txt", "a")
HOST_KEY = paramiko.RSAKey(filename='/usr/local/bin/fake-ssh/keys/private.key')
SSH_BANNER = "SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.1"
def handle_cmd(cmd, chan):
"""Branching statements to handle and prepare a response for a command"""
response = ""
if cmd.startswith("ls"):
response = "Desktop Documents Pictures Music Shared"
elif cmd.startswith("version"):
response = """GNU bash, version 3.1.27(1)-release (x86_64)
Copyright (C) 2007 Free Software Foundation, Inc."""
elif cmd.startswith("pwd"):
response = "/home/user"
elif cmd.startswith("rm"):
response = "-bash: {} not found"
else:
response = "-bash: "+cmd+" command not found"
LOG.write(response + "\n")
LOG.flush()
chan.send(response + "\r\n")
def send_ascii(filename, chan):
"""Print ascii from a file and send it to the channel"""
with open('ascii/{}'.format(filename)) as text:
chan.send("\r")
for line in enumerate(text):
LOG.write(line[1])
chan.send(line[1] + "\r")
LOG.flush()
class FakeSshServer(paramiko.ServerInterface):
"""Settings for paramiko server interface"""
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
# Accept all passwords as valid by default
return paramiko.AUTH_SUCCESSFUL
def get_allowed_auths(self, username):
return 'password'
def check_channel_shell_request(self, channel):
self.event.set()
return True
def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight, modes):
return True
def handle_connection(client, addr):
"""Handle a new ssh connection"""
ip = str(addr[0])
mac = get_mac_address(ip=str(addr[0]))
msg = "Connection from ip: "+ip+" mac: "+mac
LOG.write("\n\n"+msg+"\n")#Connection from: " + addr[0] + "\n")
print('Got a connection!')
a=load_auth_file("/usr/local/bin/email.json")
try:
email_alerts.send(auth=a,to=mail,subject="ALERT! SSH Connection attempt to fake-ssh on port 22 from: " + addr[0],message=msg)
print("Sent email")
except:
print("unable to send alert")
try:
transport = paramiko.Transport(client)
transport.add_server_key(HOST_KEY)
# Change banner to appear legit on nmap (or other network) scans
transport.local_version = SSH_BANNER
server = FakeSshServer()
try:
transport.start_server(server=server)
except paramiko.SSHException:
print('*** SSH negotiation failed.')
raise Exception("SSH negotiation failed")
# wait for auth
chan = transport.accept(20)
if chan is None:
print('*** No channel.')
raise Exception("No channel")
server.event.wait(10)
if not server.event.is_set():
print('*** Client never asked for a shell.')
raise Exception("No shell request")
try:
chan.send("""###############################################################\r\n
Welcome to Ubuntu Server Version 20.0.1\r\n
All connections are monitored and recorded\r\n
Disconnect IMMEDIATELY if you are not an authorized user!\r\n
###############################################################\r\n
\r\n""")
run = True
while run:
chan.send("$ ")
command = ""
while not command.endswith("\r"):
transport = chan.recv(1024)
# Echo input to psuedo-simulate a basic terminal
chan.send(transport)
command += transport.decode("utf-8")
chan.send("\r\n")
command = command.rstrip()
LOG.write("$ " + command + "\n")
print(command)
if command == "exit":
run = False
else:
handle_cmd(command, chan)
except Exception as err:
print('!!! Exception: {}: {}'.format(err.__class__, err))
traceback.print_exc()
try:
transport.close()
except Exception:
pass
chan.close()
except Exception as err:
print('!!! Exception: {}: {}'.format(err.__class__, err))
traceback.print_exc()
try:
transport.close()
except Exception:
pass
def start_server(port, bind):
"""Init and run the ssh server"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((bind, port))
except Exception as err:
print('*** Bind failed: {}'.format(err))
traceback.print_exc()
sys.exit(1)
threads = []
while True:
try:
sock.listen(100)
print('Listening for connection ...')
client, addr = sock.accept()
except Exception as err:
print('*** Listen/accept failed: {}'.format(err))
traceback.print_exc()
new_thread = threading.Thread(target=handle_connection, args=(client, addr))
new_thread.start()
threads.append(new_thread)
for thread in threads:
thread.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a fake ssh server')
parser.add_argument("--port", "-p", help="The port to bind the ssh server to (default 22)", default=22, type=int, action="store")
parser.add_argument("--bind", "-b", help="The address to bind the ssh server to", default="", type=str, action="store")
parser.add_argument("--mail", "-m", help="notification email", default="", type=str, action="store")
args = parser.parse_args()
mail = args.mail
start_server(args.port, args.bind)
|
kinesisanalyticsv2_demo.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Kinesis and version 2 of
the Amazon Kinesis Data Analytics API to create an application that reads data from
an input stream, uses SQL code to transform the data, and writes it to an output
stream.
"""
import logging
from pprint import pprint
import sys
import threading
import time
import boto3
from analyticsv2.analytics_application import KinesisAnalyticsApplicationV2
from streams.kinesis_stream import KinesisStream
from streams.dg_anomaly import generate
# Add relative path to include demo_tools in this code example without need for setup.
sys.path.append('../..')
from demo_tools.custom_waiter import CustomWaiter, WaitState
from demo_tools.retries import exponential_retry
logger = logging.getLogger(__name__)
class ApplicationRunningWaiter(CustomWaiter):
"""
Waits for the application to be in a running state.
"""
def __init__(self, client):
super().__init__(
'ApplicationRunning', 'DescribeApplication',
'ApplicationDetail.ApplicationStatus',
{'RUNNING': WaitState.SUCCESS, 'STOPPING': WaitState.FAILURE},
client)
def wait(self, app_name):
self._wait(ApplicationName=app_name)
# snippet-start:[python.example_code.kinesis-analytics-v2.Scenario_SqlTransform]
def usage_demo():
print('-'*88)
print("Welcome to the demo of version 2 of the Amazon Kinesis Data Analytics API.")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
kinesis_client = boto3.client('kinesis')
analytics_client = boto3.client('kinesisanalyticsv2')
iam_resource = boto3.resource('iam')
application = KinesisAnalyticsApplicationV2(analytics_client)
app_running_waiter = ApplicationRunningWaiter(analytics_client)
input_stream_name = 'doc-example-stream-input'
input_prefix = 'SOURCE_SQL_STREAM'
output_stream_name = 'doc-example-stream-output'
app_name = 'doc-example-app'
role_name = 'doc-example-kinesis-read-write'
print(f"Creating input stream {input_stream_name} and output stream "
f"{output_stream_name}.")
input_stream = KinesisStream(kinesis_client)
input_stream.create(input_stream_name)
output_stream = KinesisStream(kinesis_client)
output_stream.create(output_stream_name)
print("Starting data generator (on a separate thread) to put data into the "
"input stream.")
stream_thread = threading.Thread(
target=generate, args=(input_stream.name, kinesis_client, False), daemon=True)
stream_thread.start()
print(f"Creating role {role_name} to let Kinesis Analytics read from the input "
f"stream and write to the output stream.")
role = application.create_read_write_role(
role_name, input_stream.arn(), output_stream.arn(), iam_resource)
print("Waiting for role to be ready.")
time.sleep(10)
print(f"Creating application {app_name}.")
# Sometimes the role is still not ready and InvalidArgumentException is raised, so
# continue to retry if this happens.
app_data = exponential_retry('InvalidArgumentException')(
application.create)(app_name, role.arn)
pprint(app_data)
print(f"Discovering schema of input stream {input_stream.name}.")
input_schema = application.discover_input_schema(input_stream.arn(), role.arn)
pprint(input_schema)
print("Adding input stream to the application.")
input_details = application.add_input(
input_prefix, input_stream.arn(), input_schema)
print("Input details:")
pprint(input_details)
print("Uploading SQL code to the application to process the input stream.")
with open('analyticsv2/example.sql') as code_file:
code = code_file.read()
application.update_code(code)
print("Adding output stream to the application.")
application.add_output('DESTINATION_SQL_STREAM', output_stream.arn())
print("Starting the application.")
application.start(input_details['InputDescriptions'][0]['InputId'])
print("Waiting for the application to start (this may take a minute or two).")
app_running_waiter.wait(application.name)
print("Application started. Getting records from the output stream.")
for records in output_stream.get_records(50):
if len(records) > 0:
print(*[rec['Data'].decode() for rec in records], sep='\n')
print("Cleaning up...")
application.delete()
input_stream.delete()
output_stream.delete()
print("Deleting read/write role.")
for policy in role.attached_policies.all():
role.detach_policy(PolicyArn=policy.arn)
policy.delete()
role.delete()
print("Thanks for watching!")
print('-'*88)
# snippet-end:[python.example_code.kinesis-analytics-v2.Scenario_SqlTransform]
if __name__ == '__main__':
usage_demo()
|
views.py
|
import os
import json
import logging
from datetime import date
from threading import Thread
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponseForbidden, \
HttpResponseNotFound
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.views import View
from django.conf import settings
from .models import AnimeResult, Detail, Episode
from .anime_scrapers.scraper_handler import scraper_handler
from .anime_scrapers.info_handler import info_handler
from .anime_scrapers.download_handler import download_handler
def index(request):
return render(request, "Scraper/index.html")
def search_page(request):
return render(request, "Scraper/search.html")
def search(request, search_txt):
d_results_unformatted = scraper_handler.search(search_txt)
d_results = list()
for a in d_results_unformatted:
for b in a:
d_results.append(b)
if d_results:
for i in d_results:
try:
anime = AnimeResult.objects.get(link=i['link'])
i['aid'] = anime.aid
except ObjectDoesNotExist:
anime = AnimeResult(
name=i['title'].title(),
host=i['host'],
language=i['language'],
link=i['link'],
)
try:
anime.poster = i['poster']
except KeyError:
anime.poster = None
anime.save()
i['aid'] = anime.aid
return render(request, "Scraper/search_results.html", {
"search_text": search_txt,
"d_results": d_results,
})
def login_user(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("index"))
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "Scraper/login.html", context={
"error_msg": "The username/password entered is incorrect",
})
return render(request, "Scraper/login.html")
def logout_user(request):
if request.user.is_authenticated:
logout(request)
return HttpResponseRedirect(reverse("index"))
def play(request, anime_id, episode_id):
anime = get_object_or_404(AnimeResult, pk=anime_id)
try:
episode = anime.episode_set.get(episode_num=episode_id)
if not episode.isDownloaded():
return HttpResponseForbidden()
except ObjectDoesNotExist:
return HttpResponseNotFound()
return render(request, "Scraper/play.html", {
"anime_name": anime.name,
"episode_name": episode.getName(),
"anime_poster": anime.detail.poster_name
})
# TODO: Create a Class.
def _download(arg):
if not os.path.isdir(settings.DOWNLOAD_PATH):
os.mkdir(settings.DOWNLOAD_PATH)
successful_downloads = list()
for x in arg:
successful = False
for i in x['sources']:
logging.debug(i['link'])
successful = download_handler.single_download(
i['link'],
os.path.join(settings.DOWNLOAD_PATH, x['downloadName'])
)
logging.debug(str(successful))
if successful:
break
successful_downloads.append({
"epNum": x['epNum'],
"success": successful
})
return successful_downloads
def download(request, anime_id):
if not anime_id:
return HttpResponseForbidden()
if 'd' in request.GET:
_download_list = request.GET['d'].split(",")
download_list = list()
for x in _download_list:
if x.startswith("ep-"):
download_list.append(int(x[3:]))
else:
return HttpResponseForbidden()
anime = get_object_or_404(AnimeResult, pk=anime_id)
episodes = anime.episode_set.all()
d_eps = list()
for e in episodes:
if e.episode_num in download_list:
if not e.isDownloaded():
d_eps.append({
"epNum": e.episode_num,
"downloadName": e.getName(),
"sources": json.loads(e.episode_sources_json),
})
download_thread = Thread(target=_download, args=(d_eps,))
download_thread.start()
msg = "Downloading episodes..." if d_eps else "Already downloaded."
request.session['details_download_msg'] = msg
return HttpResponseRedirect(reverse("details", args=(anime_id,)))
return HttpResponseNotFound()
class DetailView(View):
template_name = "Scraper/view.html"
# Only implementing AniDB for now to get information.
def get(self, request, anime_id):
anime = get_object_or_404(AnimeResult, pk=anime_id)
try:
anime_detail = anime.detail
details = json.loads(anime_detail.information_json)
except ObjectDoesNotExist:
anime_detail = Detail(anime=anime)
search_anime = info_handler.search(anime.name, True)[0]
if len(search_anime) > 0:
details = info_handler.getDetailedInfo(
search_anime[0]['id'])[0]
anime_detail.poster_url = details['image_url']
if 'image_url' in details:
del details['image_url']
else:
details = {'description': 'None'}
try:
anime_detail.poster_url = details['image_url']
except:
anime_detail.poster_url = "None."
anime_detail.information_json = json.dumps(details)
anime_detail.save_poster_from_url()
anime_detail.save()
description = self.fix_description(dict(details))
anime_episodes = anime.episode_set.all()
if anime.episode_set.count() > 0 and \
(date.today() - anime_detail.episode_last_modified)\
.days <= settings.MAX_EPISODE_CACHE_DAYS:
_episodes = [json.loads(ep.episode_sources_json)
for ep in anime_episodes]
ep_nums = [ep.episode_num for ep in anime_episodes]
episodes = [
{'epNum': ep.episode_num,
'sources': json.loads(ep.episode_sources_json)}
for ep in anime_episodes
]
else:
if anime.episode_set.count() > 0:
for ep in anime.episode_set.all():
ep.delete()
_episodes = scraper_handler.resolve(anime.link)['episodes']
ep_nums = [int(x['epNum']) for x in _episodes]
episodes = [x for (y, x) in
sorted(list(zip(ep_nums, _episodes)),
key=lambda pair: pair[0])]
for i in sorted(ep_nums):
anime_episode = Episode(anime=anime)
anime_episode.episode_num = i
anime_episode.episode_sources_json = \
json.dumps(episodes[i-1]['sources'])
anime_episode.save()
anime_detail.episode_last_modified = date.today()
anime_detail.save()
downloaded_eps = list()
for ep in anime.episode_set.all():
if ep.isDownloaded():
downloaded_eps.append(ep.episode_num)
info_msg = ""
if 'details_download_msg' in request.session:
info_msg = request.session.pop('details_download_msg')
return render(request, self.template_name, {
"title": anime.name.title(),
"poster_name": anime_detail.poster_name,
"description": description,
"ep_nums": sorted(ep_nums),
"downloaded_eps": downloaded_eps,
"aid": anime_id,
"info_message": info_msg
})
def fix_description(self, description):
if 'id' in description:
del description['id']
if 'recommendations' in description:
del description['recommendations']
if 'other_names' in description:
del description['other_names']
if 'creators' in description:
del description['creators']
# description['description'] =
# str("<br>") + description['description']
new_description = dict()
if len(description) > 0:
for key in description:
new_description[self.humanize_str(str(key))] = description[key]
return new_description
def humanize_str(self, string):
string = string.replace("_", " ")
string = string.title()
return string
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
import uuid
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam import metrics
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
self._lock = threading.Lock()
def Control(self, iterator, context):
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
while True:
to_push = self._push_queue.get()
if to_push is self._DONE_MARKER:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE_MARKER:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
with self._lock:
if self._state == self.STARTED_STATE:
self.push(self._DONE_MARKER)
self._read_thread.join()
self._state = self.DONE_STATE
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
if not self._grouped_output:
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
self._grouped_output = [output_stream.get()]
self._table = None
return iter(self._grouped_output)
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, access_pattern, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None,
bundle_repeat=0,
use_state_iterables=False,
provision_info=None):
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment
or beam_runner_api_pb2.Environment(urn=python_urns.EMBEDDED_PYTHON))
self._bundle_repeat = bundle_repeat
self._progress_frequency = None
self._profiler_factory = None
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline, options):
MetricsEnvironment.set_metrics_supported(False)
RuntimeValueProvider.set_runtime_options({})
# Setup "beam_fn_api" experiment options if lacked.
experiments = (options.view_as(pipeline_options.DebugOptions).experiments
or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
options.view_as(pipeline_options.DebugOptions).experiments = experiments
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
# TODO: Move group_by_key_input_visitor() to a non-dataflow specific file.
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
if 'use_sdf_bounded_source' in experiments:
pipeline.replace_all(DataflowRunner._SDF_PTRANSFORM_OVERRIDES)
self._latest_run_result = self.run_via_runner_api(pipeline.to_runner_api(
default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
logging.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-variable
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn]),
use_state_iterables=self._use_state_iterables)
def run_stages(self, stage_context, stages):
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
stage_results = self.run_stage(
worker_handler_manager.get_worker_handler,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def run_stage(self,
worker_handler_factory,
pipeline_components,
stage,
pcoll_buffers,
safe_coders):
"""Run an individual stage.
Args:
worker_handler_factory: A ``callable`` that takes in an environment, and
returns a ``WorkerHandler`` class.
pipeline_components: TODO
stage: TODO
pcoll_buffers: TODO
safe_coders: TODO
"""
def iterable_state_write(values, element_coder_impl):
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
controller.state.blocking_append(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
controller = worker_handler_factory(stage.environment)
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = controller.data_api_service_descriptor()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = [ENCODED_IMPULSE_VALUE]
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if controller.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
controller.state_api_service_descriptor().url)
# Store the required side inputs into state.
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
controller.state.blocking_append(state_key, elements_data)
def get_buffer(buffer_id):
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
if buffer_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[buffer_id] = list()
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[safe_coders[
beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
]].get_impl()
for k in range(self._bundle_repeat):
try:
controller.state.checkpoint()
BundleManager(
controller, lambda pcoll_id: [], get_input_coder_impl,
process_bundle_descriptor, self._progress_frequency, k
).process_bundle(data_input, data_output)
finally:
controller.state.restore()
result, splits = BundleManager(
controller, get_buffer, get_input_coder_impl, process_bundle_descriptor,
self._progress_frequency).process_bundle(
data_input, data_output)
def input_for(ptransform_id, input_id):
input_pcoll = process_bundle_descriptor.transforms[
ptransform_id].inputs[input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN
and input_pcoll in proto.outputs.values()):
return read_id, 'out'
raise RuntimeError(
'No IO transform feeds %s' % ptransform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(list)
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id, 'out'] = [out.get()]
written_timers[:] = []
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
prev_stops = {}
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_impl(channel_split.ptransform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(coder_impl.decode_all(b''.join(last_sent[
channel_split.ptransform_id, channel_split.input_id])))
residual_elements = all_elements[
channel_split.first_residual_element : prev_stops.get(
channel_split.ptransform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[
channel_split.ptransform_id, channel_split.input_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.ptransform_id] = channel_split.last_primary_element
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = []
# TODO(robertwb): merge results
last_result, splits = BundleManager(
controller,
get_buffer,
get_input_coder_impl,
process_bundle_descriptor,
self._progress_frequency,
True).process_bundle(deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
self._use_continuation_tokens = False
self._continuations = {}
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def blocking_get(self, state_key, continuation_token=None):
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = 'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', '%s:0' % token_base
else:
token_base, index = continuation_token.split(':')
ix = int(index)
full_state = self._continuations[token_base]
if ix == len(full_state):
return b'', None
else:
return full_state[ix], '%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def blocking_append(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
self._state = state
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.blocking_get(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
class WorkerHandler(object):
_registered_environments = {}
def __init__(
self, control_handler, data_plane_handler, state, provision_info):
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
def close(self):
self.stop_worker()
def start_worker(self):
raise NotImplementedError
def stop_worker(self):
raise NotImplementedError
def data_api_service_descriptor(self):
raise NotImplementedError
def state_api_service_descriptor(self):
raise NotImplementedError
def logging_api_service_descriptor(self):
raise NotImplementedError
@classmethod
def register_environment(cls, urn, payload_type):
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls, environment, state, provision_info):
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self, unused_payload, state, provision_info):
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.worker = sdk_worker.SdkWorker(
sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(self.state),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
{}))
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
pass
def stop_worker(self):
self.worker.stop()
def done(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
def logging_api_service_descriptor(self):
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(
beam_provision_api_pb2_grpc.ProvisionServiceServicer):
def __init__(self, info):
self._info = info
def GetProvisionInfo(self, request, context=None):
return beam_provision_api_pb2.GetProvisionInfoResponse(
info=self._info)
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based controller for fn API control, state and data planes."""
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, state, provision_info):
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
provision_info = self.provision_info.provision_info
if not provision_info.worker_id:
provision_info = copy.copy(provision_info)
provision_info.worker_id = str(uuid.uuid4())
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
m = beam_artifact_api_pb2_grpc
m.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir),
self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state),
self.state_server)
self.logging_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=2),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(),
self.logging_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
logging.info('starting state server on port %s', self.state_port)
logging.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def data_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.data_port)
def state_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.state_port)
def logging_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.logging_port)
def close(self):
self.control_handler.done()
self.data_plane_handler.close()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
super(GrpcWorkerHandler, self).close()
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self, external_payload, state, provision_info):
super(ExternalWorkerHandler, self).__init__(state, provision_info)
self._external_payload = external_payload
def start_worker(self):
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
response = stub.NotifyRunnerAvailable(
beam_fn_api_pb2.NotifyRunnerAvailableRequest(
worker_id='worker_%s' % uuid.uuid4(),
control_endpoint=endpoints_pb2.ApiServiceDescriptor(
url=self.control_address),
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
pass
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self, num_workers_payload, state, provision_info):
super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info)
self._num_threads = int(num_workers_payload) if num_workers_payload else 1
def start_worker(self):
self.worker = sdk_worker.SdkHarness(
self.control_address, worker_count=self._num_threads)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, worker_command_line, state, provision_info):
super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info)
self._worker_command_line = worker_command_line
def start_worker(self):
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info):
super(DockerSdkWorkerHandler, self).__init__(state, provision_info)
self._container_image = payload.container_image
self._container_id = None
def start_worker(self):
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
logging.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output(
['docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % uuid.uuid4(),
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
while True:
logging.info('Waiting for docker to start up...')
status = subprocess.check_output([
'docker',
'inspect',
'-f',
'{{.State.Status}}',
self._container_id]).strip()
if status == 'running':
break
elif status in ('dead', 'exited'):
subprocess.call([
'docker',
'container',
'logs',
self._container_id])
raise RuntimeError('SDK failed to start.')
time.sleep(1)
def stop_worker(self):
if self._container_id:
subprocess.call([
'docker',
'kill',
self._container_id])
class WorkerHandlerManager(object):
def __init__(self, environments, job_provision_info=None):
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = {}
self._state = FnApiRunner.StateServicer() # rename?
def get_worker_handler(self, environment_id):
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
worker_handler = self._cached_handlers.get(environment_id)
if worker_handler is None:
worker_handler = self._cached_handlers[
environment_id] = WorkerHandler.create(
environment, self._state, self._job_provision_info)
worker_handler.start_worker()
return worker_handler
def close_all(self):
for controller in set(self._cached_handlers.values()):
try:
controller.close()
except Exception:
logging.error("Error closing controller %s" % controller, exc_info=True)
self._cached_handlers = {}
class ExtendedProvisionInfo(object):
def __init__(self, provision_info=None, artifact_staging_dir=None):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, get_input_coder_impl, bundle_descriptor,
progress_frequency=None, skip_registration=False):
self._controller = controller
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if self._registered:
registration_future = None
else:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._controller.control_handler.push(
process_bundle_registration)
self._registered = True
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names
or (stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
if not split_manager:
# Write all the input data to the channel immediately.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
split_results = []
# Actually start the bundle.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
if split_manager:
(read_transform_id, name), buffer_data = only_element(inputs.items())
num_elements = len(list(
self._get_input_coder_impl(read_transform_id).decode_all(
b''.join(buffer_data))))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id,
beam_fn_api_pb2.Target(
primitive_transform_reference=read_transform_id, name=name))
data_out.write(b''.join(buffer_data))
data_out.close()
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=
beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_reference=process_bundle_id,
desired_splits={
read_transform_id:
beam_fn_api_pb2.ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._controller.control_handler.push(
split_request).get()
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._controller.control_handler.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id,
expected_targets,
abort_callback=lambda: (result_future.is_done()
and result_future.get().error)):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
if result.process_bundle.requires_finalization:
finalize_request = beam_fn_api_pb2.InstructionRequest(
finalize_bundle=
beam_fn_api_pb2.FinalizeBundleRequest(
instruction_reference=process_bundle_id
))
self._controller.control_handler.push(
finalize_request)
return result, split_results
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
use_monitoring_infos: If true, return the metrics based on the
step_monitoring_infos.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._init_metrics_from_monitoring_infos(step_monitoring_infos)
self._monitoring_infos = step_monitoring_infos
def _init_metrics_from_monitoring_infos(self, step_monitoring_infos):
for smi in step_monitoring_infos.values():
# Only include user metrics.
for mi in smi:
if (self._user_metrics_only and
not monitoring_infos.is_user_monitoring_info(mi)):
continue
key = self._to_metric_key(mi)
if monitoring_infos.is_counter(mi):
self._counters[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_distribution(mi):
self._distributions[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_gauge(mi):
self._gauges[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
def _to_metric_key(self, monitoring_info):
# Right now this assumes that all metrics have a PTRANSFORM
ptransform_id = monitoring_info.labels['PTRANSFORM']
namespace, name = monitoring_infos.parse_namespace_and_name(monitoring_info)
return MetricKey(ptransform_id, MetricName(namespace, name))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
def monitoring_infos(self):
return [item for sublist in self._monitoring_infos.values() for item in
sublist]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable object including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
core.py
|
"""
************
MQTTany Core
************
:Author: Michael Murton
"""
# Copyright (c) 2019-2021 MQTTany contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__all__ = ["start", "stop"]
import multiprocessing as mproc
import time
import types
import typing as t
from importlib import import_module
from queue import Empty as QueueEmptyError
import bus
import logger
from common import BusMessage, PoisonPill, PublishMessage, SignalHook, SubscribeMessage
from config import load_config
from logger import log_traceback
from modules import (
ATTR_CBTRANSMIT,
ATTR_LOAD,
ATTR_LOG,
ATTR_NODES,
ATTR_QRESEND,
ATTR_QSUBSCRIBE,
ATTR_QTRANSMIT,
ATTR_START,
ATTR_STOP,
ATTR_TXREADY,
ATTR_TYPE,
ModuleType,
call,
)
log = logger.get_logger("core")
communication_modules: t.List[types.ModuleType] = []
interface_modules: t.List[types.ModuleType] = []
def _loop_comm(module: types.ModuleType) -> None:
def _get_message(queue_name: str) -> t.Union[BusMessage, None]:
try:
return getattr(module, queue_name).get_nowait()
except QueueEmptyError:
return None
def _queue_resend(msg: BusMessage) -> None:
messages: t.List[BusMessage] = [msg]
message = _get_message(ATTR_QRESEND)
while message:
messages.append(message)
message = _get_message(ATTR_QRESEND)
for message in messages:
getattr(module, ATTR_QRESEND).put_nowait(message)
signal = SignalHook()
call(module, ATTR_START)
message: t.Union[BusMessage, None] = None
while (not isinstance(message, PoisonPill)) and signal.signal != signal.SIGTERM:
message = None
if call(module, ATTR_TXREADY):
# check for messages in the resend queue first
message = _get_message(ATTR_QRESEND)
# if it is empty then we can get a new message from the transmit queue
message = message if message else _get_message(ATTR_QTRANSMIT)
if isinstance(message, PublishMessage):
# TODO convert modules to classes
module.log.trace("Message received to transmit: %s", message) # type: ignore
if not call(module, ATTR_CBTRANSMIT, message=message):
# transmit failed
# TODO convert modules to classes
module.log.debug( # type: ignore
"Failed to send message, queued for retransmission"
)
_queue_resend(message)
time.sleep(0.5) # 500ms
elif (not isinstance(message, PoisonPill)) and message is not None:
try:
# TODO convert modules to classes
module.log.warn("Got unrecognized message to transmit: %s", message) # type: ignore
except:
# TODO convert modules to classes
module.log.warn("Got unrecognized message to transmit") # type: ignore
else:
# module not ready to transmit, but check transmit queue in case exit is requested
message = _get_message(ATTR_QTRANSMIT)
if isinstance(message, PublishMessage):
# TODO convert modules to classes
module.log.debug("Not ready to send, message queued for retransmission") # type: ignore
_queue_resend(message)
time.sleep(0.5) # 500ms
if not message:
time.sleep(0.025) # 25ms
elif isinstance(message, PoisonPill):
# TODO convert modules to classes
module.log.trace("Module stopping") # type: ignore
if signal.signal == signal.SIGTERM:
# TODO convert modules to classes
module.log.trace("Received %s", signal.signal.name) # type: ignore
call(module, ATTR_STOP)
def _loop_interface(module: types.ModuleType) -> None:
signal = SignalHook()
call(module, ATTR_START)
message: t.Union[BusMessage, None] = None
while (not isinstance(message, PoisonPill)) and signal.signal != signal.SIGTERM:
try:
message = getattr(module, ATTR_QSUBSCRIBE).get(timeout=1)
except QueueEmptyError:
pass
else:
if isinstance(message, SubscribeMessage):
# TODO convert modules to classes
module.log.trace("Message received on subscribe queue: %s", message) # type: ignore
call(module, message.callback, message=message)
elif isinstance(message, PoisonPill):
# TODO convert modules to classes
module.log.trace("Module stopping") # type: ignore
else:
try:
# TODO convert modules to classes
module.log.warn( # type: ignore
"Got unrecognized message on subscribe queue: %s", message
)
except:
# TODO convert modules to classes
module.log.warn("Got unrecognized message on subscribe queue") # type: ignore
if signal.signal == signal.SIGTERM:
# TODO convert modules to classes
module.log.trace("Received %s", signal.signal.name) # type: ignore
call(module, ATTR_STOP)
def _validate_module(module: types.ModuleType) -> bool:
module_name = module.__name__.split(".")[-1]
valid = True
def check_function(name: str, required: t.Optional[bool] = True) -> bool:
func_valid = True
log_func = getattr(log, "error" if required else "debug")
cb = getattr(module, name, None)
if cb is None:
log_func("Module '%s' has no '%s' function", module_name, name)
func_valid = False
elif not callable(cb):
log.warn(
"Module '%s' does not have a callable '%s' function", module_name, name
)
func_valid = False
return func_valid if required else True
if isinstance(getattr(module, ATTR_TYPE, None), ModuleType):
# Not working? type(logging.Logger) == <class 'type'>
# if isinstance(getattr(module, ATTR_LOG), logger.logging.Logger):
if type(getattr(module, ATTR_LOG)) == "<class 'logging.Logger'>":
log.error(
"Module '%s' does not have a valid logger assigned to '%s'",
module_name,
ATTR_LOG,
)
valid = False
valid &= check_function(ATTR_LOAD)
valid &= check_function(ATTR_START, False)
valid &= check_function(ATTR_STOP, False)
if getattr(module, ATTR_TYPE) == ModuleType.COMMUNICATION:
log.debug("Module '%s' is a communication module", module_name)
valid &= check_function(ATTR_TXREADY)
valid &= check_function(ATTR_CBTRANSMIT)
elif getattr(module, ATTR_TYPE) == ModuleType.INTERFACE:
log.debug("Module '%s' is an interface module", module_name)
if not hasattr(module, ATTR_NODES):
log.error("Module '%s' is missing '%s'", module_name, ATTR_NODES)
valid = False
elif hasattr(module, ATTR_TYPE):
log.error(
"Module '%s' has an invalid module type and will not be loaded", module_name
)
valid = False
else:
log.error("Module '%s' has no module type and will not be loaded", module_name)
valid = False
return valid
def _load_modules(
config_file: str, core_queue: "mproc.Queue[str]"
) -> t.List[types.ModuleType]:
"""
Loads each module with a section in the config and spawns a process for them
"""
config = load_config(config_file)
if not config:
return []
for module_name in [key for key in config if isinstance(config[key], dict)]:
module = None
log.debug("Loading module '%s'", module_name)
try:
module = import_module(f"modules.{module_name}")
except ImportError as err:
log.error("Failed to import module '%s'", module_name)
log.error(" %s", err)
log_traceback(log)
log.error("Module '%s' was not loaded", module_name)
except ImportWarning as err:
log.warn("Warnings occured when importing module '%s'", module_name)
log.warn(" %s", err)
log_traceback(log)
log.error("Module '%s' was not loaded", module_name)
else:
if _validate_module(module):
# call module load
if not call(module, ATTR_LOAD, config_raw=config[module_name]):
log.warn("Module '%s' load failed", module_name)
continue
else:
log.debug("Module '%s' loaded successfully", module_name)
if getattr(module, ATTR_TYPE) == ModuleType.COMMUNICATION:
bus.setup_comm_module(module, core_queue)
communication_modules.append(module)
elif getattr(module, ATTR_TYPE) == ModuleType.INTERFACE:
bus.setup_interface_module(module)
if not getattr(module, ATTR_NODES, {}):
log.error(
"Module '%s' contains no valid nodes and will not be loaded",
module_name,
)
continue
interface_modules.append(module)
finally:
del module
del config
return communication_modules + interface_modules
def _start_modules() -> None:
"""
Starts a subprocess for each module that was loaded.
"""
for module in communication_modules + interface_modules:
module_name = module.__name__.split(".")[-1]
try:
log.trace("Creating process for '%s'", module_name)
if getattr(module, ATTR_TYPE) == ModuleType.COMMUNICATION:
target = _loop_comm
else: # if getattr(module, ATTR_TYPE) == ModuleType.INTERFACE:
target = _loop_interface
# TODO convert modules to classes
module.process = mproc.Process( # type: ignore
name=module_name, target=target, args=(module,), daemon=False
)
except Exception as err: # pylint: disable=broad-except
log.error("Failed to create process for module '%s'", module_name)
log.error(" %s", err)
else:
log.trace("Process created successfully for module '%s'", module_name)
try:
log.trace("Starting process for '%s'", module_name)
# TODO convert modules to classes
module.process.start() # type: ignore
except Exception as err: # pylint: disable=broad-except
log.error("Failed to start process for module '%s'", module_name)
log.error(" %s", err)
else:
log.info("Module '%s' started successfully", module_name)
def _stop_modules() -> None:
"""
Unloads each module that was loaded and terminates processes
"""
for module in interface_modules + communication_modules:
module_name = module.__name__.split(".")[-1]
if module:
if hasattr(module, "process"):
# TODO convert modules to classes
if module.process.is_alive(): # type: ignore
log.trace(
"Stopping subprocess for '%s' with 10s timeout", module_name
)
if hasattr(module, "transmit_queue"):
# TODO convert modules to classes
module.transmit_queue.put_nowait(PoisonPill()) # type: ignore
else:
# TODO convert modules to classes
module.subscribe_queue.put_nowait(PoisonPill()) # type: ignore
# TODO convert modules to classes
module.process.join(10) # type: ignore
if module.process.is_alive(): # type: ignore
log.warn(
"Subprocess for module '%s' did not stop when requested, "
"terminating forcefully",
module_name,
)
# TODO convert modules to classes
module.process.terminate() # type: ignore
# make sure cleanup is done
# TODO convert modules to classes
module.process.join(10) # type: ignore
log.warn(
"Subprocess terminated forcefully for module '%s'",
module_name,
)
else:
log.debug(
"Subproccess for module '%s' stopped cleanly", module_name
)
else:
# TODO convert modules to classes
module.process.join(10) # type: ignore
log.warn("Subprocess for module '%s' was not running", module_name)
else:
log.warn("Module '%s' does not have a subprocess", module_name)
log.info("Module '%s' unloaded", module_name)
def start(core_queue: "mproc.Queue[str]", config_file: str) -> None:
def check_import(name: str) -> bool:
try:
lib = import_module(name)
del lib
except ModuleNotFoundError:
log.error("Missing import: %s", name)
return False
else:
return True
missing_imports = 0
for name in ["yaml", "yamlloader", "mprop", "adafruit_platformdetect", "periphery"]:
missing_imports += int(not check_import(name))
if missing_imports > 0:
log.error("Please see the wiki for instructions on how to install requirements")
core_queue.put_nowait(__name__)
else:
import gpio
gpio.init()
if _load_modules(config_file, core_queue):
_start_modules()
bus.start()
else:
core_queue.put_nowait(__name__)
def stop() -> None:
_stop_modules()
bus.stop()
|
scripts_regression_tests.py
|
#!/usr/bin/env python
"""
Script containing CIME python regression test suite. This suite should be run
to confirm overall CIME correctness.
"""
import glob, os, re, shutil, signal, sys, tempfile, \
threading, time, logging, unittest, getpass, \
filecmp, time
from xml.etree.ElementTree import ParseError
LIB_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","lib")
sys.path.append(LIB_DIR)
# Remove all pyc files to ensure we're testing the right things
import subprocess, argparse
subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR)
import six
from six import assertRaisesRegex
import collections
from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit, safe_copy, CIMEError
import get_tests
import CIME.test_scheduler, CIME.wait_for_tests
from CIME.test_scheduler import TestScheduler
from CIME.XML.compilers import Compilers
from CIME.XML.env_run import EnvRun
from CIME.XML.machines import Machines
from CIME.XML.files import Files
from CIME.case import Case
from CIME.code_checker import check_code, get_all_checkable_files
from CIME.test_status import *
SCRIPT_DIR = CIME.utils.get_scripts_root()
TOOLS_DIR = os.path.join(SCRIPT_DIR,"Tools")
TEST_COMPILER = None
GLOBAL_TIMEOUT = None
TEST_MPILIB = None
MACHINE = None
FAST_ONLY = False
NO_BATCH = False
NO_CMAKE = False
TEST_ROOT = None
NO_TEARDOWN = False
os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00"
# pragma pylint: disable=protected-access
###############################################################################
def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False):
###############################################################################
from_dir = os.getcwd() if from_dir is None else from_dir
stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose)
if expected_stat == 0:
expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat
else:
expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (expected_stat, stat)
msg = \
"""
COMMAND: %s
FROM_DIR: %s
%s
OUTPUT: %s
ERRPUT: %s
""" % (cmd, from_dir, expectation, output, errput)
test_obj.assertEqual(stat, expected_stat, msg=msg)
return output
###############################################################################
def assert_test_status(test_obj, test_name, test_status_obj, test_phase, expected_stat):
###############################################################################
test_status = test_status_obj.get_status(test_phase)
test_obj.assertEqual(test_status, expected_stat, msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format(test_name, test_phase, test_status, expected_stat))
###############################################################################
class A_RunUnitTests(unittest.TestCase):
###############################################################################
def test_resolve_variable_name(self):
files = Files()
machinefile = files.get_value("MACHINES_SPEC_FILE")
self.assertTrue(os.path.isfile(machinefile),
msg="Path did not resolve to existing file %s" % machinefile)
def test_unittests(self):
# Finds all files contained in CIME/tests or its subdirectories that
# match the pattern 'test*.py', and runs the unit tests found there
# (i.e., tests defined using python's unittest module).
#
# This is analogous to running:
# python -m unittest discover -s CIME/tests -t .
# from cime/scripts/lib
#
# Yes, that means we have a bunch of unit tests run from this one unit
# test.
testsuite = unittest.defaultTestLoader.discover(
start_dir = os.path.join(LIB_DIR,"CIME","tests"),
pattern = 'test*.py',
top_level_dir = LIB_DIR)
testrunner = unittest.TextTestRunner(buffer=False)
# Disable logging; otherwise log messages written by code under test
# clutter the unit test output
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
results = testrunner.run(testsuite)
finally:
logging.getLogger().setLevel(log_lvl)
self.assertTrue(results.wasSuccessful())
def test_lib_doctests(self):
# Find and run all the doctests in the lib directory tree
skip_list = ["six.py", "CIME/SystemTests/mvk.py", "CIME/SystemTests/pgn.py"]
for root, _, files in os.walk(LIB_DIR):
for file_ in files:
filepath = os.path.join(root, file_)[len(LIB_DIR)+1:]
if filepath.endswith(".py") and filepath not in skip_list:
with open(os.path.join(root, file_)) as fd:
content = fd.read()
if '>>>' in content:
print("Running doctests for {}".format(filepath))
run_cmd_assert_result(self, 'PYTHONPATH={}:$PYTHONPATH python -m doctest {} 2>&1'.format(LIB_DIR, filepath), from_dir=LIB_DIR)
else:
print("{} has no doctests".format(filepath))
###############################################################################
def make_fake_teststatus(path, testname, status, phase):
###############################################################################
expect(phase in CORE_PHASES, "Bad phase '%s'" % phase)
with TestStatus(test_dir=path, test_name=testname) as ts:
for core_phase in CORE_PHASES:
if core_phase == phase:
ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else ""))
break
else:
ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else ""))
###############################################################################
def parse_test_status(line):
###############################################################################
regex = re.compile(r"Test '(\w+)' finished with status '(\w+)'")
m = regex.match(line)
return m.groups()
###############################################################################
def kill_subprocesses(name=None, sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
# Kill all subprocesses
proc_ids = CIME.utils.find_proc_id(proc_name=name, children_only=True)
if (expected_num_killed is not None):
tester.assertEqual(len(proc_ids), expected_num_killed,
msg="Expected to find %d processes to kill, found %d" % (expected_num_killed, len(proc_ids)))
for proc_id in proc_ids:
try:
os.kill(proc_id, sig)
except OSError:
pass
###############################################################################
def kill_python_subprocesses(sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
kill_subprocesses("[Pp]ython", sig, expected_num_killed, tester)
###########################################################################
def assert_dashboard_has_build(tester, build_name, expected_count=1):
###########################################################################
# Do not test E3SM dashboard if model is CESM
if CIME.utils.get_model() == "e3sm":
time.sleep(10) # Give chance for cdash to update
wget_file = tempfile.mktemp()
run_cmd_no_fail("wget https://my.cdash.org/index.php?project=ACME_test --no-check-certificate -O %s" % wget_file)
raw_text = open(wget_file, "r").read()
os.remove(wget_file)
num_found = raw_text.count(build_name)
tester.assertEqual(num_found, expected_count,
msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" % (build_name, expected_count, num_found))
###############################################################################
def setup_proxy():
###############################################################################
if ("http_proxy" not in os.environ):
proxy = MACHINE.get_value("PROXY")
if (proxy is not None):
os.environ["http_proxy"] = proxy
return True
return False
###############################################################################
class N_TestUnitTest(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests')
cls._testdirs = []
def _has_unit_test_support(self):
if TEST_COMPILER is None:
default_compiler = MACHINE.get_default_compiler()
compiler = Compilers(MACHINE, compiler=default_compiler)
else:
compiler = Compilers(MACHINE, compiler=TEST_COMPILER)
attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'false'}
pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH",
attributes=attrs)
if pfunit_path is None:
return False
else:
return True
def test_a_unit_test(self):
cls = self.__class__
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"unit_tester_test")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
unit_test_tool = os.path.abspath(os.path.join(CIME.utils.get_cime_root(),"scripts","fortran_unit_testing","run_tests.py"))
test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests")
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
def test_b_cime_f90_unit_tests(self):
cls = self.__class__
if (FAST_ONLY):
self.skipTest("Skipping slow test")
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"driver_f90_tests")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
test_spec_dir = CIME.utils.get_cime_root()
unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py"))
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
teardown_root = True
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
teardown_root = False
elif do_teardown:
shutil.rmtree(tfile)
if teardown_root and do_teardown:
shutil.rmtree(cls._testroot)
###############################################################################
class J_TestCreateNewcase(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._testdirs = []
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestCreateNewcase')
def test_a_createnewcase(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase')
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --case %s --compset X --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
ntasks = case.get_value("NTASKS_ATM")
case.set_value("NTASKS_ATM", ntasks+1)
# this should fail with a locked file issue
run_cmd_assert_result(self, "./case.build",
from_dir=testdir, expected_stat=1)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fred")
# this should not fail with a locked file issue
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
run_cmd_assert_result(self, "./case.st_archive --test-all", from_dir=testdir)
# Trying to set values outside of context manager should fail
case = Case(testdir, read_only=False)
with self.assertRaises(CIMEError):
case.set_value("NTASKS_ATM", 42)
# Trying to read_xml with pending changes should fail
with self.assertRaises(CIMEError):
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fouc")
case.read_xml()
cls._do_teardown.append(testdir)
def test_aa_no_flush_on_instantiate(self):
testdir = os.path.join(self.__class__._testroot, 'testcreatenewcase')
with Case(testdir, read_only=False) as case:
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Instantiating a case should not trigger a flush call")
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
runfile = case.get_env('run')
self.assertTrue(runfile.needsrewrite, msg="Expected flush call not triggered")
for env_file in case._files:
if env_file != runfile:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Flush the file
runfile.write()
# set it again to the same value
case.set_value("HIST_OPTION","nyears")
# now the file should not need to be flushed
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Check once more with a new instance
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
def test_b_user_mods(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testusermods')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test1")
args = " --case %s --compset X --res f19_g16 --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r"% (testdir, user_mods_dir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s "
% (SCRIPT_DIR, args),from_dir=SCRIPT_DIR)
self.assertTrue(os.path.isfile(os.path.join(testdir,"SourceMods","src.drv","somefile.F90")), msg="User_mods SourceMod missing")
with open(os.path.join(testdir,"user_nl_cpl"),"r") as fd:
contents = fd.read()
self.assertTrue("a different cpl test option" in contents, msg="User_mods contents of user_nl_cpl missing")
self.assertTrue("a cpl namelist option" in contents, msg="User_mods contents of user_nl_cpl missing")
cls._do_teardown.append(testdir)
def test_c_create_clone_keepexe(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_keepexe')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test3")
cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \
% (SCRIPT_DIR, prevtestdir, testdir, user_mods_dir)
run_cmd_assert_result(self, cmd, from_dir=SCRIPT_DIR, expected_stat=1)
def test_d_create_clone_new_user(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_new_user')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
# change the USER and CIME_OUTPUT_ROOT to nonsense values
# this is intended as a test of whether create_clone is independent of user
run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user",
from_dir=prevtestdir)
fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user")
run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot,
from_dir=prevtestdir)
# this test should pass (user name is replaced)
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s " %
(SCRIPT_DIR, prevtestdir, testdir),from_dir=SCRIPT_DIR)
shutil.rmtree(testdir)
# this test should pass
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s --cime-output-root %s" %
(SCRIPT_DIR, prevtestdir, testdir, cls._testroot),from_dir=SCRIPT_DIR)
cls._do_teardown.append(testdir)
def test_dd_create_clone_not_writable(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_not_writable')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
with Case(prevtestdir, read_only=False) as case1:
case2 = case1.create_clone(testdir)
with self.assertRaises(CIMEError):
case2.set_value("CHARGE_ACCOUNT", "fouc")
def test_e_xmlquery(self):
# Set script and script path
xmlquery = "./xmlquery"
cls = self.__class__
casedir = cls._testdirs[0]
# Check for environment
self.assertTrue(os.path.isdir(SCRIPT_DIR))
self.assertTrue(os.path.isdir(TOOLS_DIR))
self.assertTrue(os.path.isfile(os.path.join(casedir,xmlquery)))
# Test command line options
with Case(casedir, read_only=True) as case:
STOP_N = case.get_value("STOP_N")
COMP_CLASSES = case.get_values("COMP_CLASSES")
BUILD_COMPLETE = case.get_value("BUILD_COMPLETE")
cmd = xmlquery + " STOP_N --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(STOP_N), msg="%s != %s"%(output, STOP_N))
cmd = xmlquery + " BUILD_COMPLETE --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == "TRUE", msg="%s != %s"%(output, BUILD_COMPLETE))
# we expect DOCN_MODE to be undefined in this X compset
# this test assures that we do not try to resolve this as a compvar
cmd = xmlquery + " DOCN_MODE --value"
_, output, error = run_cmd(cmd, from_dir=casedir)
self.assertTrue(error == "ERROR: No results found for variable DOCN_MODE",
msg="unexpected result for DOCN_MODE, output {}, error {}".
format(output, error))
for comp in COMP_CLASSES:
caseresult = case.get_value("NTASKS_%s"%comp)
cmd = xmlquery + " NTASKS_%s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
cmd = xmlquery + " NTASKS --subgroup %s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
if MACHINE.has_batch_system():
JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run")
cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == JOB_QUEUE, msg="%s != %s"%(output, JOB_QUEUE))
cmd = xmlquery + " --listall"
run_cmd_no_fail(cmd, from_dir=casedir)
cls._do_teardown.append(cls._testroot)
def test_f_createnewcase_with_user_compset(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
pesfile = os.path.join("..","src","drivers","mct","cime_config","config_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
cls._do_teardown.append(testdir)
def test_g_createnewcase_with_user_compset_and_env_mach_pes(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset_and_env_mach_pes')
if os.path.exists(testdir):
shutil.rmtree(testdir)
previous_testdir = cls._testdirs[-1]
cls._testdirs.append(testdir)
pesfile = os.path.join(previous_testdir,"env_mach_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir)
# this line should cause the diff to fail (I assume no machine is going to default to 17 tasks)
run_cmd_assert_result(self, "./xmlchange NTASKS=17", from_dir=testdir)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir,
expected_stat=1)
cls._do_teardown.append(testdir)
def test_h_primary_component(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testprimarycomponent')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
args = " --case CreateNewcaseTest --script-root %s --compset X --res f19_g16 --output-root %s --handle-preexisting-dirs u" % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s" % (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
with Case(testdir, read_only=False) as case:
case._compsetname = case.get_value("COMPSET")
case.set_comp_classes(case.get_values("COMP_CLASSES"))
primary = case._find_primary_component()
self.assertEqual(primary, "drv", msg="primary component test expected drv but got %s"%primary)
# now we are going to corrupt the case so that we can do more primary_component testing
case.set_valid_values("COMP_GLC","%s,fred"%case.get_value("COMP_GLC"))
case.set_value("COMP_GLC","fred")
primary = case._find_primary_component()
self.assertEqual(primary, "fred", msg="primary component test expected fred but got %s"%primary)
case.set_valid_values("COMP_ICE","%s,wilma"%case.get_value("COMP_ICE"))
case.set_value("COMP_ICE","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
case.set_valid_values("COMP_OCN","%s,bambam,docn"%case.get_value("COMP_OCN"))
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "bambam", msg="primary component test expected bambam but got %s"%primary)
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
# This is a "J" compset
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
case.set_value("COMP_OCN","docn")
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
self.assertEqual(primary, "barney", msg="primary component test expected barney but got %s"%primary)
case.set_valid_values("COMP_ATM","%s,wilma"%case.get_value("COMP_ATM"))
case.set_value("COMP_ATM","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
# this is a "E" compset
case._compsetname = case._compsetname.replace("XOCN","DOCN%SOM")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
# finally a "B" compset
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
cls._do_teardown.append(testdir)
def test_j_createnewcase_user_compset_vs_alias(self):
"""
Create a compset using the alias and another compset using the full compset name
and make sure they are the same by comparing the namelist files in CaseDocs.
Ignore the modelio files and clean the directory names out first.
"""
cls = self.__class__
testdir1 = os.path.join(cls._testroot, 'testcreatenewcase_user_compset')
if os.path.exists(testdir1):
shutil.rmtree(testdir1)
cls._testdirs.append(testdir1)
args = ' --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --res f19_g16 --output-root {} --handle-preexisting-dirs u' .format(testdir1, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}" .format (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir1)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir1)
dir1 = os.path.join(testdir1,"CaseDocs")
dir2 = os.path.join(testdir1,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir1),"PATH")
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs1 = dir2
testdir2 = os.path.join(cls._testroot, 'testcreatenewcase_alias_compset')
if os.path.exists(testdir2):
shutil.rmtree(testdir2)
cls._testdirs.append(testdir2)
args = ' --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --res f19_g16 --output-root {} --handle-preexisting-dirs u'.format(testdir2, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir2)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir2)
dir1 = os.path.join(testdir2,"CaseDocs")
dir2 = os.path.join(testdir2,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir2),"PATH")
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs2 = dir2
dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2)
self.assertTrue(len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files))
cls._do_teardown.append(testdir1)
cls._do_teardown.append(testdir2)
def test_k_append_config(self):
machlist_before = MACHINE.list_available_machines()
self.assertEqual(len(machlist_before)>1, True, msg="Problem reading machine list")
newmachfile = os.path.join(CIME.utils.get_cime_root(),"config",
"xml_schemas","config_machines_template.xml")
MACHINE.read(newmachfile)
machlist_after = MACHINE.list_available_machines()
self.assertEqual(len(machlist_after)-len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format(len(machlist_after), len(machlist_before)))
self.assertEqual("mymachine" in machlist_after, True, msg="Not able to append config_machines.xml")
def test_m_createnewcase_alternate_drivers(self):
# Test that case.setup runs for nuopc and moab drivers
cls = self.__class__
for driver in ("nuopc", "moab"):
testdir = os.path.join(cls._testroot, 'testcreatenewcase.{}'.format( driver))
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format(driver, testdir, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
with Case(testdir, read_only=False) as case:
comp_interface = case.get_value("COMP_INTERFACE")
self.assertTrue(driver == comp_interface, msg="%s != %s"%(driver, comp_interface))
cls._do_teardown.append(testdir)
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
elif do_teardown:
try:
print ("Attempt to remove directory {}".format(tfile))
shutil.rmtree(tfile)
except BaseException:
print("Could not remove directory {}".format(tfile))
###############################################################################
class M_TestWaitForTests(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests")
# basic tests
self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass')
self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail')
self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished')
self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2')
# live tests
self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1')
self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2')
self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2,
self._testdir_teststatus1, self._testdir_teststatus2]
basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)]
for testdir in self._testdirs:
if os.path.exists(testdir):
shutil.rmtree(testdir)
os.makedirs(testdir)
for r in range(10):
for testdir in basic_tests:
os.makedirs(os.path.join(testdir, str(r)))
make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_with_fail, "5"), "Test_5", TEST_FAIL_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE)
integration_tests = self._testdirs[len(basic_tests):]
for integration_test in integration_tests:
os.makedirs(os.path.join(integration_test, "0"))
make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0])
# Set up proxy if possible
self._unset_proxy = setup_proxy()
self._thread_error = None
###########################################################################
def tearDown(self):
###########################################################################
do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN
if do_teardown:
for testdir in self._testdirs:
shutil.rmtree(testdir)
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
###########################################################################
def simple_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -b %s" % build_name
expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE
output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args),
from_dir=testdir, expected_stat=expected_stat)
lines = [line for line in output.splitlines() if line.startswith("Test '")]
self.assertEqual(len(lines), len(expected_results))
for idx, line in enumerate(lines):
testname, status = parse_test_status(line)
self.assertEqual(status, expected_results[idx])
self.assertEqual(testname, "Test_%d" % idx)
###########################################################################
def threaded_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
try:
self.simple_test(testdir, expected_results, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def test_wait_for_test_all_pass(self):
###########################################################################
self.simple_test(self._testdir_all_pass, ["PASS"] * 10)
###########################################################################
def test_wait_for_test_with_fail(self):
###########################################################################
expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_with_fail, expected_results)
###########################################################################
def test_wait_for_test_no_wait(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "-n")
###########################################################################
def test_wait_for_test_timeout(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3")
###########################################################################
def test_wait_for_test_wait_for_pend(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_for_missing_run_phase(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished2, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_cdash_pass(self):
###########################################################################
expected_results = ["PASS"] * 10
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_all_pass, expected_results, "", "regression_test_pass"))
run_thread.daemon = True
run_thread.start()
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, "regression_test_pass")
###########################################################################
def test_wait_for_test_cdash_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_unfinished, expected_results, "", "regression_test_kill"))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, "regression_test_kill")
if CIME.utils.get_model() == "e3sm":
cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing")
tag_file = os.path.join(cdash_result_dir, "TAG")
self.assertTrue(os.path.isdir(cdash_result_dir))
self.assertTrue(os.path.isfile(tag_file))
tag = open(tag_file, "r").readlines()[0].strip()
xml_file = os.path.join(cdash_result_dir, tag, "Test.xml")
self.assertTrue(os.path.isfile(xml_file))
xml_contents = open(xml_file, "r").read()
self.assertTrue(r'<TestList><Test>Test_0</Test><Test>Test_1</Test><Test>Test_2</Test><Test>Test_3</Test><Test>Test_4</Test><Test>Test_5</Test><Test>Test_6</Test><Test>Test_7</Test><Test>Test_8</Test><Test>Test_9</Test></TestList>'
in xml_contents)
self.assertTrue(r'<Test Status="notrun"><Name>Test_5</Name>' in xml_contents)
# TODO: Any further checking of xml output worth doing?
###########################################################################
def live_test_impl(self, testdir, expected_results, last_phase, last_status):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
for core_phase in CORE_PHASES[1:]:
with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts:
ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS)
time.sleep(5)
if core_phase != last_phase:
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase))
else:
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished after phase {}".format(core_phase))
break
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_test_status_integration_pass(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_wait_for_test_test_status_integration_submit_fail(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS)
###############################################################################
class TestCreateTestCommon(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._thread_error = None
self._unset_proxy = setup_proxy()
self._machine = MACHINE.get_machine_name()
self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp()
self._baseline_area = os.path.join(TEST_ROOT, "baselines")
self._testroot = TEST_ROOT
self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH
self._do_teardown = not NO_TEARDOWN
###########################################################################
def tearDown(self):
###########################################################################
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
files_to_clean = []
baselines = os.path.join(self._baseline_area, self._baseline_name)
if (os.path.isdir(baselines)):
files_to_clean.append(baselines)
for test_id in ["master", self._baseline_name]:
for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)):
files_to_clean.append(leftover)
do_teardown = self._do_teardown and sys.exc_info() == (None, None, None)
if (not do_teardown):
print("Detected failed test or user request no teardown")
print("Leaving files:")
for file_to_clean in files_to_clean:
print(" " + file_to_clean)
else:
# For batch machines need to avoid race condition as batch system
# finishes I/O for the case.
if self._hasbatch:
time.sleep(5)
for file_to_clean in files_to_clean:
if (os.path.isdir(file_to_clean)):
shutil.rmtree(file_to_clean)
else:
os.remove(file_to_clean)
###########################################################################
def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_errors=False, env_changes=""):
###########################################################################
test_id = CIME.utils.get_timestamp() if test_id is None else test_id
extra_args.append("-t {}".format(test_id))
extra_args.append("--baseline-root {}".format(self._baseline_area))
if NO_BATCH:
extra_args.append("--no-batch")
if TEST_COMPILER and ([extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []):
extra_args.append("--compiler={}".format(TEST_COMPILER))
if TEST_MPILIB and ([extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []):
extra_args.append("--mpilib={}".format(TEST_MPILIB))
extra_args.append("--test-root={0} --output-root={0}".format(TEST_ROOT))
full_run = (set(extra_args) & set(["-n", "--namelist-only", "--no-setup", "--no-build"])) == set()
if self._hasbatch:
expected_stat = 0 if not pre_run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
else:
expected_stat = 0 if not pre_run_errors and not run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{} {}/create_test {}".format(env_changes, SCRIPT_DIR, " ".join(extra_args)),
expected_stat=expected_stat)
if full_run:
self._wait_for_tests(test_id, expect_works=(not pre_run_errors and not run_errors))
###########################################################################
def _wait_for_tests(self, test_id, expect_works=True):
###########################################################################
if self._hasbatch:
timeout_arg = "--timeout={}".format(GLOBAL_TIMEOUT) if GLOBAL_TIMEOUT is not None else ""
expected_stat = 0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{}/wait_for_tests {} *{}/TestStatus".format(TOOLS_DIR, timeout_arg, test_id),
from_dir=self._testroot, expected_stat=expected_stat)
###############################################################################
class O_TestTestScheduler(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_a_phases(self):
###########################################################################
# exclude the MEMLEAK tests here.
tests = get_tests.get_full_test_names(["cime_test_only",
"^TESTMEMLEAKFAIL_P1.f09_g16.X",
"^TESTMEMLEAKPASS_P1.f09_g16.X",
"^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A",
"^TESTTESTDIFF_P1.f19_g16_rx1.A",
"^TESTBUILDFAILEXC_P1.f19_g16_rx1.A",
"^TESTRUNFAILEXC_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
self.assertEqual(len(tests), 3)
ct = TestScheduler(tests, test_root=TEST_ROOT, output_root=TEST_ROOT,
compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
self.assertTrue("BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test)
self.assertTrue("RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test)
self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test)
for idx, phase in enumerate(ct._phases):
for test in ct._tests:
if (phase == CIME.test_scheduler.TEST_START):
continue
elif (phase == MODEL_BUILD_PHASE):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
if (test == build_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
elif (phase == RUN_PHASE):
if (test == build_fail_test):
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
else:
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._work_remains(test))
if (test == run_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
with self.assertRaises(CIMEError):
ct._update_test_status(test, ct._phases[idx+1], TEST_PEND_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PASS_STATUS)
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
ct._update_test_status(test, phase, TEST_PASS_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
###########################################################################
def test_b_full(self):
###########################################################################
tests = get_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0]
build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0]
run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0]
mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0]
mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0]
st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id, expect_works=False)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
log_files = glob.glob("%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id))
self.assertEqual(len(log_files), 1, "Expected exactly one TestStatus.log file, found %d" % len(log_files))
log_file = log_files[0]
if (test_name == build_fail_test):
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Intentional fail for testing infrastructure" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == build_fail_exc_test):
data = open(log_file, "r").read()
assert_test_status(self, test_name, ts, SHAREDLIB_BUILD_PHASE, TEST_FAIL_STATUS)
self.assertTrue("Exception from init" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == run_fail_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
elif (test_name == run_fail_exc_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Exception from run_phase" in data,
"Broken test did not report run error:\n%s" % data)
elif (test_name == mem_fail_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif (test_name == test_diff_test):
assert_test_status(self, test_name, ts, "COMPARE_base_rest", TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif test_name == st_arch_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, STARCHIVE_PHASE, TEST_FAIL_STATUS)
else:
self.assertTrue(test_name in [pass_test, mem_pass_test])
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
if (test_name == mem_pass_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_c_use_existing(self):
###########################################################################
tests = get_tests.get_full_test_names(["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
self._wait_for_tests(test_id, expect_works=False)
for test_status in test_statuses:
casedir = os.path.dirname(test_status)
ts = TestStatus(test_dir=casedir)
test_name = ts.get_name()
if test_name == build_fail_test:
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MODEL_BUILD_PHASE, TEST_PEND_STATUS)
elif test_name == run_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(SUBMIT_PHASE, TEST_PEND_STATUS)
else:
self.assertTrue(test_name == pass_test)
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
os.environ["TESTBUILDFAIL_PASS"] = "True"
os.environ["TESTRUNFAIL_PASS"] = "True"
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
del os.environ["TESTBUILDFAIL_PASS"]
del os.environ["TESTRUNFAIL_PASS"]
# test that passed tests are not re-run
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_d_retry(self):
###########################################################################
args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"]
self._create_test(args)
###############################################################################
class P_TestJenkinsGenericJob(TestCreateTestCommon):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping Jenkins tests. E3SM feature")
TestCreateTestCommon.setUp(self)
# Need to run in a subdir in order to not have CTest clash. Name it
# such that it should be cleaned up by the parent tearDown
self._testdir = os.path.join(self._testroot, "jenkins_test_%s" % self._baseline_name)
os.makedirs(self._testdir)
# Change root to avoid clashing with other jenkins_generic_jobs
self._jenkins_root = os.path.join(self._testdir, "J")
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
###########################################################################
def simple_test(self, expect_works, extra_args, build_name=None):
###########################################################################
if NO_BATCH:
extra_args += " --no-batch"
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name
run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area),
from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE))
###########################################################################
def threaded_test(self, expect_works, extra_args, build_name=None):
###########################################################################
try:
self.simple_test(expect_works, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def assert_num_leftovers(self, suite):
###########################################################################
num_tests_in_tiny = len(get_tests.get_test_suite(suite))
jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs
# scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs
self.assertEqual(num_tests_in_tiny, len(jenkins_dirs),
msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
(self._jenkins_root, num_tests_in_tiny, jenkins_dirs))
# JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job
# self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs),
# msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
# (self._testroot, num_tests_in_tiny, scratch_dirs))
###########################################################################
def test_jenkins_generic_job(self):
###########################################################################
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_only_pass")
build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp()
self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_kill(self):
###########################################################################
build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp()
run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master --baseline-compare=no", build_name))
run_thread.daemon = True
run_thread.start()
time.sleep(120)
kill_subprocesses(sig=signal.SIGTERM)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="jenkins_generic_job should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_realistic_dash(self):
###########################################################################
# The actual quality of the cdash results for this test can only
# be inspected manually
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_all")
# Should create a diff
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Should create a nml diff
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*"))
self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob))
import stat
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp()
self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###############################################################################
class M_TestCimePerformance(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case_ctrl_performance(self):
###########################################################################
ts = time.time()
num_repeat = 5
for _ in range(num_repeat):
self._create_test(["cime_tiny","--no-build"])
elapsed = time.time() - ts
print("Perf test result: {:0.2f}".format(elapsed))
###############################################################################
class T_TestRunRestart(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_run_restart(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3)
###########################################################################
def test_run_restart_too_many_fails(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name, env_changes="NODEFAIL_NUM_FAILS=5", run_errors=True)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4)
###############################################################################
class Q_TestBlessTestResults(TestCreateTestCommon):
###############################################################################
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
###############################################################################
def test_bless_test_results(self):
###############################################################################
# Generate some baselines
test_name = "TESTRUNDIFF_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-g", "-o", "-b", self._baseline_name, test_name]
compargs = ["-c", "-b", self._baseline_name, test_name]
else:
genargs = ["-g", self._baseline_name, "-o", test_name,
"--baseline-root ", self._baseline_area]
compargs = ["-c", self._baseline_name, test_name,
"--baseline-root ", self._baseline_area]
self._create_test(genargs)
# Hist compare should pass
self._create_test(compargs)
# Change behavior
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Hist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, run_errors=True)
# compare_test_results should detect the fail
cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* BASELINE' % test_name)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display failed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Hist compare should now pass again
self._create_test(compargs)
###############################################################################
def test_rebless_namelist(self):
###############################################################################
# Generate some namelist baselines
test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"]
compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass"]
else:
genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass"]
compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass"]
self._create_test(genargs)
# Basic namelist compare
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id)
# Check standalone case.cmpgen_namelists
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir)
# compare_test_results should pass
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd)
# use regex
expected_pattern = re.compile(r'PASS %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_area = self._baseline_area
baseline_glob = glob.glob(os.path.join(baseline_area, self._baseline_name, "TEST*"))
self.assertEqual(len(baseline_glob), 3, msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob))
import stat
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
# Basic namelist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, pre_run_errors=True)
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# preview namelists should work
run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir)
# This should still fail
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# compare_test_results should fail
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Basic namelist compare should now pass again
self._create_test(compargs)
class X_TestQueryConfig(unittest.TestCase):
def test_query_compsets(self):
run_cmd_no_fail("{}/query_config --compsets".format(SCRIPT_DIR))
def test_query_components(self):
run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR))
def test_query_grids(self):
run_cmd_no_fail("{}/query_config --grids".format(SCRIPT_DIR))
def test_query_machines(self):
run_cmd_no_fail("{}/query_config --machines".format(SCRIPT_DIR))
###############################################################################
class Z_FullSystemTest(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_full_system(self):
###########################################################################
# Put this inside any test that's slow
if (FAST_ONLY):
self.skipTest("Skipping slow test")
self._create_test(["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name)
run_cmd_assert_result(self, "%s/cs.status.%s" % (self._testroot, self._baseline_name),
from_dir=self._testroot)
# Ensure that we can get test times
test_statuses = glob.glob(os.path.join(self._testroot, "*%s" % self._baseline_name, "TestStatus"))
for test_status in test_statuses:
test_time = CIME.wait_for_tests.get_test_time(os.path.dirname(test_status))
self.assertIs(type(test_time), int, msg="get time did not return int for %s" % test_status)
self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status)
# Test that re-running works
tests = get_tests.get_test_suite("cime_developer", machine=self._machine, compiler=self._compiler)
for test in tests:
casedir = os.path.join(TEST_ROOT, "%s.%s" % (test, self._baseline_name))
# Subtle issue: The run phases of these tests will be in the PASS state until
# the submitted case.test script is run, which could take a while if the system is
# busy. This potentially leaves a window where the wait_for_tests command below will
# not wait for the re-submitted jobs to run because it sees the original PASS.
# The code below forces things back to PEND to avoid this race condition. Note
# that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND
# state is how system tests know they are being re-run and must reset certain
# case settings.
if self._hasbatch:
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MEMLEAK_PHASE, TEST_PEND_STATUS)
run_cmd_assert_result(self, "./case.submit --skip-preview-namelist", from_dir=casedir)
self._wait_for_tests(self._baseline_name)
###############################################################################
class K_TestCimeCase(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name)
self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int)
self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool])
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_complete = case.get_value("BUILD_COMPLETE")
self.assertFalse(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.set_value("BUILD_COMPLETE", True)
build_complete = case.get_value("BUILD_COMPLETE")
self.assertTrue(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.flush()
build_complete = run_cmd_no_fail("./xmlquery BUILD_COMPLETE -value",
from_dir=casedir)
self.assertEqual(build_complete, "TRUE",
msg="Build complete had wrong value '%s'" %
build_complete)
# Test some test properties
self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS")
def _batch_test_fixture(self, testcase_name):
if not MACHINE.has_batch_system() or NO_BATCH:
self.skipTest("Skipping testing user prerequisites without batch systems")
testdir = os.path.join(TEST_ROOT, testcase_name)
if os.path.exists(testdir):
shutil.rmtree(testdir)
run_cmd_assert_result(self, ("{}/create_newcase --case {} --script-root {} " +
"--compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {}").format(
SCRIPT_DIR, testcase_name, testdir, testdir),
from_dir=SCRIPT_DIR)
return testdir
###########################################################################
def test_cime_case_prereq(self):
###########################################################################
testcase_name = 'prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
if case.get_value("depend_string") is None:
self.skipTest("Skipping prereq test, depend_string was not provided for this batch system")
job_name = "case.run"
prereq_name = 'prereq_test'
batch_commands = case.submit_jobs(prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
self.assertTrue(len(batch_commands) > 0, "case.submit_jobs did not return any job submission string")
# The first element in the internal sequence should just be the job name
# The second one (batch_cmd_index) should be the actual batch submission command
batch_cmd_index = 1
# The prerequisite should be applied to all jobs, though we're only expecting one
for batch_cmd in batch_commands:
self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences")
self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index))
self.assertTrue(isinstance(batch_cmd[1], six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1]))
batch_cmd_args = batch_cmd[1]
jobid_ident = "jobid"
dep_str_fmt = case.get_env('batch').get_value('depend_string', subgroup=None)
self.assertTrue(jobid_ident in dep_str_fmt, "dependency string doesn't include the jobid identifier {}".format(jobid_ident))
dep_str = dep_str_fmt[:dep_str_fmt.index(jobid_ident)]
prereq_substr = None
while dep_str in batch_cmd_args:
dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str)
batch_cmd_args = batch_cmd_args[dep_id_pos:]
prereq_substr = batch_cmd_args[:len(prereq_name)]
if prereq_substr == prereq_name:
break
self.assertTrue(prereq_name in prereq_substr, "Dependencies added, but not the user specified one")
###########################################################################
def test_cime_case_allow_failed_prereq(self):
###########################################################################
testcase_name = 'allow_failed_prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_allow = case.get_value("depend_allow_string")
if depend_allow is None:
self.skipTest("Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system")
job_name = "case.run"
prereq_name = "prereq_allow_fail_test"
depend_allow = depend_allow.replace("jobid", prereq_name)
batch_commands = case.submit_jobs(prereq=prereq_name, allow_fail=True, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
num_submissions = 1
if case.get_value("DOUT_S"):
num_submissions = 2
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return any job submission strings")
self.assertTrue(depend_allow in batch_commands[0][1])
###########################################################################
def test_cime_case_resubmit_immediate(self):
###########################################################################
testcase_name = 'resubmit_immediate_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_string = case.get_value("depend_string")
if depend_string is None:
self.skipTest("Skipping resubmit_immediate test, depend_string was not provided for this batch system")
depend_string = depend_string.replace("jobid", "")
job_name = "case.run"
num_submissions = 6
case.set_value("RESUBMIT", num_submissions - 1)
batch_commands = case.submit_jobs(job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
if case.get_value("DOUT_S"):
num_submissions = 12
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return {} submitted jobs".format(num_submissions))
for i, cmd in enumerate(batch_commands):
if i > 0:
self.assertTrue(depend_string in cmd[1])
###########################################################################
def test_cime_case_st_archive_resubmit(self):
###########################################################################
testcase_name = "st_archive_resubmit_test"
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
case.case_setup(clean=False, test_mode=False, reset=True)
orig_resubmit = 2
case.set_value("RESUBMIT", orig_resubmit)
case.case_st_archive(resubmit=False)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue(orig_resubmit == new_resubmit, "st_archive resubmitted when told not to")
case.case_st_archive(resubmit=True)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue((orig_resubmit - 1) == new_resubmit, "st_archive did not resubmit when told to")
###########################################################################
def test_cime_case_build_threaded_1(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertFalse(build_threaded)
build_threaded = case.get_build_threaded()
self.assertFalse(build_threaded)
case.set_value("FORCE_BUILD_SMP", True)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_build_threaded_2(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x2.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertTrue(build_threaded)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_mpi_serial(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
# Serial cases should not be using pnetcdf
self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf")
# Serial cases should be using 1 task
self.assertEqual(case.get_value("TOTALPES"), 1)
self.assertEqual(case.get_value("NTASKS_CPL"), 1)
###########################################################################
def test_cime_case_force_pecount(self):
###########################################################################
self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
self.assertEqual(case.get_value("NTASKS_CPL"), 16)
self.assertEqual(case.get_value("NTHRDS_CPL"), 8)
###########################################################################
def test_cime_case_xmlchange_append(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1")
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1 -opt2")
###########################################################################
def test_cime_case_test_walltime_mgmt_1(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_2(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_3(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=0:10:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch") # Not smart enough to select faster queue
###########################################################################
def test_cime_case_test_walltime_mgmt_4(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=2:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "2:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_5(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", from_dir=casedir, expected_stat=1)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "slartibartfast")
###########################################################################
def test_cime_case_test_walltime_mgmt_6(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_walltime_mgmt_7(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", "--walltime=01:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_custom_project(self):
###########################################################################
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "melvin", "gnu" # have to use a machine both models know and one that doesn't put PROJECT in any key paths
self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project=testproj", test_name],
test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir)
self.assertEqual(result, "testproj")
###########################################################################
def test_create_test_longname(self):
###########################################################################
self._create_test(["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"])
###########################################################################
def test_env_loading(self):
###########################################################################
if self._machine != "melvin":
self.skipTest("Skipping env load test - Only works on melvin")
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
env_mach = case.get_env("mach_specific")
orig_env = dict(os.environ)
env_mach.load_env(case)
module_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
env_mach.load_env(case, force_method="generic")
generic_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
problems = ""
for mkey, mval in module_env.items():
if mkey not in generic_env:
if not mkey.startswith("PS") and mkey != "OLDPWD":
problems += "Generic missing key: {}\n".format(mkey)
elif mval != generic_env[mkey] and mkey not in ["_", "SHLVL", "PWD"] and not mkey.endswith("()"):
problems += "Value mismatch for key {}: {} != {}\n".format(mkey, repr(mval), repr(generic_env[mkey]))
for gkey in generic_env.keys():
if gkey not in module_env:
problems += "Modules missing key: {}\n".format(gkey)
self.assertEqual(problems, "", msg=problems)
###########################################################################
def test_case_submit_interface(self):
###########################################################################
try:
import imp
except ImportError:
print("imp not found, skipping case.submit interface test")
return
sys.path.append(TOOLS_DIR)
case_submit_path = os.path.join(TOOLS_DIR, "case.submit")
submit_interface = imp.load_source("case_submit_interface", case_submit_path)
sys.argv = ["case.submit", "--batch-args", "'random_arguments_here.%j'",
"--mail-type", "fail", "--mail-user", "'random_arguments_here.%j'"]
submit_interface._main_func(None, True)
###########################################################################
def test_xml_caching(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
active = os.path.join(casedir, "env_run.xml")
backup = os.path.join(casedir, "env_run.xml.bak")
safe_copy(active, backup)
with Case(casedir, read_only=False) as case:
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
self.assertEqual(env_run.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
case.read_xml() # Manual re-sync
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(env_run.get_value("RUN_TYPE"), "startup")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
# behind the back detection
with self.assertRaises(CIMEError):
with Case(casedir, read_only=False) as case:
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
case.set_value("RUN_TYPE", "branch")
with self.assertRaises(CIMEError):
with Case(casedir) as case:
time.sleep(0.2)
safe_copy(backup, active)
###############################################################################
class X_TestSingleSubmit(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_single_submit(self):
###########################################################################
# Skip unless on a batch system and users did not select no-batch
if (not self._hasbatch):
self.skipTest("Skipping single submit. Not valid without batch")
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping single submit. E3SM experimental feature")
if self._machine not in ["sandiatoss3"]:
self.skipTest("Skipping single submit. Only works on sandiatoss3")
# Keep small enough for now that we don't have to worry about load balancing
self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"],
env_changes="unset CIME_GLOBAL_WALLTIME &&")
###############################################################################
class L_TestSaveTimings(TestCreateTestCommon):
###############################################################################
###########################################################################
def simple_test(self, manual_timing=False):
###########################################################################
timing_flag = "" if manual_timing else "--save-timing"
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime="+walltime], test_id=self._baseline_name)
statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, self._baseline_name))
self.assertEqual(len(statuses), 1, msg="Should have had exactly one match, found %s" % statuses)
casedir = os.path.dirname(statuses[0])
with Case(casedir, read_only=True) as case:
lids = get_lids(case)
timing_dir = case.get_value("SAVE_TIMING_DIR")
casename = case.get_value("CASE")
self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids)
if manual_timing:
run_cmd_assert_result(self, "cd %s && %s/save_provenance postrun" % (casedir, TOOLS_DIR))
if CIME.utils.get_model() == "e3sm":
provenance_dirs = glob.glob(os.path.join(timing_dir, "performance_archive", getpass.getuser(), casename, lids[0] + "*"))
self.assertEqual(len(provenance_dirs), 1, msg="provenance dirs were missing")
###########################################################################
def test_save_timings(self):
###########################################################################
self.simple_test()
###########################################################################
def test_save_timings_manual(self):
###########################################################################
self.simple_test(manual_timing=True)
# Machinery for Macros generation tests.
class MockMachines(object):
"""A mock version of the Machines object to simplify testing."""
def __init__(self, name, os_):
"""Store the name."""
self.name = name
self.os = os_
def get_machine_name(self):
"""Return the name we were given."""
return self.name
def get_value(self, var_name):
"""Allow the operating system to be queried."""
assert var_name == "OS", "Build asked for a value not " \
"implemented in the testing infrastructure."
return self.os
def is_valid_compiler(self, _): # pylint:disable=no-self-use
"""Assume all compilers are valid."""
return True
def is_valid_MPIlib(self, _):
"""Assume all MPILIB settings are valid."""
return True
# pragma pylint: disable=unused-argument
def get_default_MPIlib(self, attributes=None):
return "mpich2"
def get_default_compiler(self):
return "intel"
def get_macros(macro_maker, build_xml, build_system):
"""Generate build system ("Macros" file) output from config_compilers XML.
Arguments:
macro_maker - The underlying Build object.
build_xml - A string containing the XML to operate on.
build_system - Either "Makefile" or "CMake", depending on desired output.
The return value is a string containing the build system output.
"""
# Build.write_macros expects file-like objects as input, so
# we need to wrap the strings in StringIO objects.
xml = six.StringIO(str(build_xml))
output = six.StringIO()
output_format = None
if build_system == "Makefile":
output_format = "make"
elif build_system == "CMake":
output_format = "cmake"
else:
output_format = build_system
macro_maker.write_macros_file(macros_file=output,
output_format=output_format, xml=xml)
return str(output.getvalue())
def _wrap_config_compilers_xml(inner_string):
"""Utility function to create a config_compilers XML string.
Pass this function a string containing <compiler> elements, and it will add
the necessary header/footer to the file.
"""
_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<config_compilers>
{}
</config_compilers>
"""
return _xml_template.format(inner_string)
class MakefileTester(object):
"""Helper class for checking Makefile output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
# Note that the following is a Makefile and the echo line must begin with a tab
_makefile_template = """
include Macros
query:
\techo '$({})' > query.out
"""
def __init__(self, parent, make_string):
"""Constructor for Makefile test helper class.
Arguments:
parent - The TestCase object that is using this item.
make_string - Makefile contents to test.
"""
self.parent = parent
self.make_string = make_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in the Makefile, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
make.
var - A dict containing extra make variables to set when calling make.
(The distinction between env and var actually matters only for
CMake, though.)
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the Makefile strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros")
makefile_name = os.path.join(temp_dir, "Makefile")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
macros_file.write(self.make_string)
with open(makefile_name, "w") as makefile:
makefile.write(self._makefile_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
environment.update(var)
gmake_exe = MACHINE.get_value("GMAKE")
if gmake_exe is None:
gmake_exe = "gmake"
run_cmd_assert_result(self.parent, "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the Makefiles.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the Makefile has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the Makefile matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
class CMakeTester(object):
"""Helper class for checking CMake output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
_cmakelists_template = """
include(./Macros.cmake)
file(WRITE query.out "${{{}}}")
"""
def __init__(self, parent, cmake_string):
"""Constructor for CMake test helper class.
Arguments:
parent - The TestCase object that is using this item.
cmake_string - CMake contents to test.
"""
self.parent = parent
self.cmake_string = cmake_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in Macros.cmake, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
cmake.
var - A dict containing extra CMake variables to set when calling cmake.
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the CMake strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros.cmake")
cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
for key in var:
macros_file.write("set({} {})\n".format(key, var[key]))
macros_file.write(self.cmake_string)
with open(cmakelists_name, "w") as cmakelists:
cmakelists.write(self._cmakelists_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
os_ = MACHINE.get_value("OS")
# cmake will not work on cray systems without this flag
if os_ == "CNL":
cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount"
else:
cmake_args = ""
run_cmd_assert_result(self.parent, "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the CMake files.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the CMakeLists has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the CMkeLists matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
###############################################################################
class G_TestMacrosBasic(unittest.TestCase):
###############################################################################
"""Basic infrastructure tests.
This class contains tests that do not actually depend on the output of the
macro file conversion. This includes basic smoke testing and tests of
error-handling in the routine.
"""
def test_script_is_callable(self):
"""The test script can be called on valid output without dying."""
# This is really more a smoke test of this script than anything else.
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
test_xml = _wrap_config_compilers_xml("<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>")
get_macros(maker, test_xml, "Makefile")
def test_script_rejects_bad_xml(self):
"""The macro writer rejects input that's not valid XML."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
with self.assertRaises(ParseError):
get_macros(maker, "This is not valid XML.", "Makefile")
def test_script_rejects_bad_build_system(self):
"""The macro writer rejects a bad build system string."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
bad_string = "argle-bargle."
with assertRaisesRegex(self,
CIMEError,
"Unrecognized build system provided to write_macros: " + bad_string):
get_macros(maker, "This string is irrelevant.", bad_string)
###############################################################################
class H_TestMakeMacros(unittest.TestCase):
###############################################################################
"""Makefile macros tests.
This class contains tests of the Makefile output of Build.
Aside from the usual setUp and test methods, this class has a utility method
(xml_to_tester) that converts XML input directly to a MakefileTester object.
"""
def setUp(self):
self.test_os = "SomeOS"
self.test_machine = "mymachine"
self.test_compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self.test_mpilib = MACHINE.get_default_MPIlib(attributes={"compiler":self.test_compiler}) if TEST_MPILIB is None else TEST_MPILIB
self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0)
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
return MakefileTester(self, get_macros(self._maker, test_xml, "Makefile"))
def test_generic_item(self):
"""The macro writer can write out a single generic item."""
xml_string = "<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"
tester = self.xml_to_tester(xml_string)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_machine_specific_item(self):
"""The macro writer can pick out a machine-specific item."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
# Do this a second time, but with elements in the reverse order, to
# ensure that the code is not "cheating" by taking the first match.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_ignore_non_match(self):
"""The macro writer ignores an entry with the wrong machine name."""
xml1 = """<compiler MACH="bad"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
# Again, double-check that we don't just get lucky with the order.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_os_specific_item(self):
"""The macro writer can pick out an OS-specific item."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_other_compiler(self):
"""The macro writer compiler-specific logic works as expected."""
xml1 = """<compiler COMPILER="{}"><CFLAGS><base>a b c</base></CFLAGS></compiler>""".format(self.test_compiler)
xml2 = """<compiler MACH="{}" COMPILER="other"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine)
xml3 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><append>x y z</append></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
xml4 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml3)
tester.assert_variable_equals("CFLAGS", "a b c x y z",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml4)
tester.assert_variable_equals("CFLAGS", "x y z",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml4+xml1)
tester.assert_variable_equals("CFLAGS", "x y z",env={"COMPILER":self.test_compiler})
def test_mach_beats_os(self):
"""The macro writer chooses machine-specific over os-specific matches."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_and_os_beats_mach(self):
"""The macro writer chooses the most-specific match possible."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}" OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = xml2.format(self.test_machine, self.test_os)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_build_time_attribute(self):
"""The macro writer writes conditionals for build-time choices."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="openmpi">/path/to/openmpi</MPI_PATH></compiler>"""
xml3 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
tester = self.xml_to_tester(xml1+xml2+xml3)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", env={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", env={"MPILIB": "openmpi"})
tester = self.xml_to_tester(xml3+xml2+xml1)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", env={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", env={"MPILIB": "openmpi"})
def test_reject_duplicate_defaults(self):
"""The macro writer dies if given many defaults."""
xml1 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH>/path/to/other_default</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_duplicates(self):
"""The macro writer dies if given many matches for a given configuration."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich2</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_ambiguous(self):
"""The macro writer dies if given an ambiguous set of matches."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH DEBUG="FALSE">/path/to/mpi-debug</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_compiler_changeable_at_build_time(self):
"""The macro writer writes information for multiple compilers."""
xml1 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler COMPILER="gnu"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", env={"COMPILER": "gnu"})
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_base_flags(self):
"""Test that we get "base" compiler flags."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2")
def test_machine_specific_base_flags(self):
"""Test selection among base compiler flag sets based on machine."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-O3</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O3")
def test_build_time_base_flags(self):
"""Test selection of base flags based on build-time attributes."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><base DEBUG="TRUE">-O3</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
def test_build_time_base_flags_same_parent(self):
"""Test selection of base flags in the same parent element."""
xml1 = """<base>-O2</base>"""
xml2 = """<base DEBUG="TRUE">-O3</base>"""
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml1+xml2+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
# Check for order independence here, too.
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml2+xml1+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
def test_append_flags(self):
"""Test appending flags to a list."""
xml1 = """<compiler><FFLAGS><base>-delicious</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
# Order independence, as usual.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
def test_machine_specific_append_flags(self):
"""Test appending flags that are either more or less machine-specific."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><append>-cake</append></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$")
def test_machine_specific_base_keeps_append_flags(self):
"""Test that machine-specific base flags don't override default append flags."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_machine_specific_base_and_append_flags(self):
"""Test that machine-specific base flags coexist with machine-specific append flags."""
xml1 = """<compiler MACH="{}"><FFLAGS><append>-delicious</append></FFLAGS></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_append_flags_without_base(self):
"""Test appending flags to a value set before Macros is included."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"})
def test_build_time_append_flags(self):
"""Test build_time selection of compiler flags."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append DEBUG="TRUE">-and-pie</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake")
tester.assert_variable_matches("FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", env={"DEBUG": "TRUE"})
def test_environment_variable_insertion(self):
"""Test that ENV{..} inserts environment variables."""
# DO it again with $ENV{} style
xml1 = """<compiler><LDFLAGS><append>-L$ENV{NETCDF} -lnetcdf</append></LDFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("LDFLAGS", "-L/path/to/netcdf -lnetcdf",
env={"NETCDF": "/path/to/netcdf"})
def test_shell_command_insertion(self):
"""Test that $SHELL insert shell command output."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_multiple_shell_commands(self):
"""Test that more than one $SHELL element can be used."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -$SHELL{echo fast}</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_env_and_shell_command(self):
"""Test that $ENV works inside $SHELL elements."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo $ENV{OPT_LEVEL}} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"})
def test_config_variable_insertion(self):
"""Test that $VAR insert variables from config_compilers."""
# Construct an absurd chain of references just to sure that we don't
# pass by accident, e.g. outputting things in the right order just due
# to good luck in a hash somewhere.
xml1 = """<MPI_LIB_NAME>stuff-${MPI_PATH}-stuff</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPICC}</MPI_PATH>"""
xml3 = """<MPICC>${MPICXX}</MPICC>"""
xml4 = """<MPICXX>${MPIFC}</MPICXX>"""
xml5 = """<MPIFC>mpicc</MPIFC>"""
tester = self.xml_to_tester("<compiler>"+xml1+xml2+xml3+xml4+xml5+"</compiler>")
tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff")
def test_config_reject_self_references(self):
"""Test that $VAR self-references are rejected."""
# This is a special case of the next test, which also checks circular
# references.
xml1 = """<MPI_LIB_NAME>${MPI_LIB_NAME}</MPI_LIB_NAME>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+"</compiler>")
def test_config_reject_cyclical_references(self):
"""Test that cyclical $VAR references are rejected."""
xml1 = """<MPI_LIB_NAME>${MPI_PATH}</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPI_LIB_NAME}</MPI_PATH>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+xml2+"</compiler>")
def test_variable_insertion_with_machine_specific_setting(self):
"""Test that machine-specific $VAR dependencies are correct."""
xml1 = """<compiler><MPI_LIB_NAME>something</MPI_LIB_NAME></compiler>"""
xml2 = """<compiler MACH="{}"><MPI_LIB_NAME>$MPI_PATH</MPI_LIB_NAME></compiler>""".format(self.test_machine)
xml3 = """<compiler><MPI_PATH>${MPI_LIB_NAME}</MPI_PATH></compiler>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester(xml1+xml2+xml3)
def test_override_with_machine_and_new_attributes(self):
"""Test that overrides with machine-specific settings with added attributes work correctly."""
xml1 = """
<compiler COMPILER="{}">
<SCC>icc</SCC>
<MPICXX>mpicxx</MPICXX>
<MPIFC>mpif90</MPIFC>
<MPICC>mpicc</MPICC>
</compiler>""".format(self.test_compiler)
xml2 = """
<compiler COMPILER="{}" MACH="{}">
<MPICXX>mpifoo</MPICXX>
<MPIFC MPILIB="{}">mpiffoo</MPIFC>
<MPICC MPILIB="NOT_MY_MPI">mpifouc</MPICC>
</compiler>
""".format(self.test_compiler, self.test_machine, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_override_with_machine_and_same_attributes(self):
"""Test that machine-specific conditional overrides with the same attribute work correctly."""
xml1 = """
<compiler COMPILER="{}">
<MPIFC MPILIB="{}">mpifc</MPIFC>
</compiler>""".format(self.test_compiler, self.test_mpilib)
xml2 = """
<compiler MACH="{}" COMPILER="{}">
<MPIFC MPILIB="{}">mpif90</MPIFC>
</compiler>
""".format(self.test_machine, self.test_compiler, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_appends_not_overriden(self):
"""Test that machine-specific base value changes don't interfere with appends."""
xml1="""
<compiler COMPILER="{}">
<FFLAGS>
<base>-base1</base>
<append DEBUG="FALSE">-debug1</append>
</FFLAGS>
</compiler>""".format(self.test_compiler)
xml2="""
<compiler MACH="{}" COMPILER="{}">
<FFLAGS>
<base>-base2</base>
<append DEBUG="TRUE">-debug2</append>
</FFLAGS>
</compiler>""".format(self.test_machine, self.test_compiler)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
def test_multilevel_specificity(self):
"""Check that settings with multiple levels of machine-specificity can be resolved."""
xml1="""
<compiler>
<MPIFC DEBUG="FALSE">mpifc</MPIFC>
</compiler>"""
xml2="""
<compiler OS="{}">
<MPIFC MPILIB="{}">mpif03</MPIFC>
</compiler>""".format(self.test_os, self.test_mpilib)
xml3="""
<compiler MACH="{}">
<MPIFC DEBUG="TRUE">mpif90</MPIFC>
</compiler>""".format(self.test_machine)
# To verify order-independence, test every possible ordering of blocks.
testers = []
testers.append(self.xml_to_tester(xml1+xml2+xml3))
testers.append(self.xml_to_tester(xml1+xml3+xml2))
testers.append(self.xml_to_tester(xml2+xml1+xml3))
testers.append(self.xml_to_tester(xml2+xml3+xml1))
testers.append(self.xml_to_tester(xml3+xml1+xml2))
testers.append(self.xml_to_tester(xml3+xml2+xml1))
for tester in testers:
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"})
tester.assert_variable_equals("MPIFC", "mpif03", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"})
tester.assert_variable_equals("MPIFC", "mpifc", env={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"})
def test_remove_dependency_issues(self):
"""Check that overridden settings don't cause inter-variable dependencies."""
xml1="""
<compiler>
<MPIFC>${SFC}</MPIFC>
</compiler>"""
xml2="""
<compiler MACH="{}">""".format(self.test_machine) + """
<SFC>${MPIFC}</SFC>
<MPIFC>mpif90</MPIFC>
</compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
###############################################################################
class I_TestCMakeMacros(H_TestMakeMacros):
###############################################################################
"""CMake macros tests.
This class contains tests of the CMake output of Build.
This class simply inherits all of the methods of TestMakeOutput, but changes
the definition of xml_to_tester to create a CMakeTester instead.
"""
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
if (NO_CMAKE):
self.skipTest("Skipping cmake test")
else:
return CMakeTester(self, get_macros(self._maker, test_xml, "CMake"))
###############################################################################
class S_TestManageAndQuery(unittest.TestCase):
"""Tests various scripts to manage and query xml files"""
def _run_and_assert_query_testlist(self, extra_args=""):
"""Ensure that query_testlist runs successfully with the given extra arguments"""
files = Files()
testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"})
run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format(
SCRIPT_DIR, testlist_drv, extra_args))
def test_query_testlists_runs(self):
"""Make sure that query_testlists runs successfully
This simply makes sure that query_testlists doesn't generate any errors
when it runs. This helps ensure that changes in other utilities don't
break query_testlists.
"""
self._run_and_assert_query_testlist(extra_args="--show-options")
def test_query_testlists_define_testtypes_runs(self):
"""Make sure that query_testlists runs successfully with the --define-testtypes argument"""
self._run_and_assert_query_testlist(extra_args="--define-testtypes")
def test_query_testlists_count_runs(self):
"""Make sure that query_testlists runs successfully with the --count argument"""
self._run_and_assert_query_testlist(extra_args="--count")
def test_query_testlists_list_runs(self):
"""Make sure that query_testlists runs successfully with the --list argument"""
self._run_and_assert_query_testlist(extra_args="--list categories")
###############################################################################
class B_CheckCode(unittest.TestCase):
###############################################################################
# Tests are generated in the main loop below
longMessage = True
all_results = None
def make_pylint_test(pyfile, all_files):
def test(self):
if B_CheckCode.all_results is None:
B_CheckCode.all_results = check_code(all_files)
#pylint: disable=unsubscriptable-object
result = B_CheckCode.all_results[pyfile]
self.assertTrue(result == "", msg=result)
return test
def check_for_pylint():
#pylint: disable=import-error
from distutils.spawn import find_executable
pylint = find_executable("pylint")
if pylint is not None:
output = run_cmd_no_fail("pylint --version")
pylintver = re.search(r"pylint\s+(\d+)[.](\d+)[.](\d+)", output)
major = int(pylintver.group(1))
minor = int(pylintver.group(2))
if pylint is None or major < 1 or (major == 1 and minor < 5):
print("pylint version 1.5 or newer not found, pylint tests skipped")
return False
return True
def write_provenance_info():
curr_commit = get_current_commit(repo=LIB_DIR)
logging.info("\nTesting commit %s" % curr_commit)
cime_model = CIME.utils.get_model()
logging.info("Using cime_model = %s" % cime_model)
logging.info("Testing machine = %s" % MACHINE.get_machine_name())
if TEST_COMPILER is not None:
logging.info("Testing compiler = %s"% TEST_COMPILER)
if TEST_MPILIB is not None:
logging.info("Testing mpilib = %s"% TEST_MPILIB)
logging.info("Test root: %s\n" % TEST_ROOT)
def _main_func(description):
global MACHINE
global NO_CMAKE
global FAST_ONLY
global NO_BATCH
global TEST_COMPILER
global TEST_MPILIB
global TEST_ROOT
global GLOBAL_TIMEOUT
global NO_TEARDOWN
config = CIME.utils.get_cime_config()
help_str = \
"""
{0} [TEST] [TEST]
OR
{0} --help
\033[1mEXAMPLES:\033[0m
\033[1;32m# Run the full suite \033[0m
> {0}
\033[1;32m# Run all code checker tests \033[0m
> {0} B_CheckCode
\033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m
> {0} M_TestWaitForTests.test_wait_for_test_all_pass
""".format(os.path.basename(sys.argv[0]))
parser = argparse.ArgumentParser(usage=help_str,
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--fast", action="store_true",
help="Skip full system tests, which saves a lot of time")
parser.add_argument("--no-batch", action="store_true",
help="Do not submit jobs to batch system, run locally."
" If false, will default to machine setting.")
parser.add_argument("--no-cmake", action="store_true",
help="Do not run cmake tests")
parser.add_argument("--no-teardown", action="store_true",
help="Do not delete directories left behind by testing")
parser.add_argument("--machine",
help="Select a specific machine setting for cime")
parser.add_argument("--compiler",
help="Select a specific compiler setting for cime")
parser.add_argument( "--mpilib",
help="Select a specific compiler setting for cime")
parser.add_argument( "--test-root",
help="Select a specific test root for all cases created by the testing")
parser.add_argument("--timeout", type=int,
help="Select a specific timeout for all tests")
ns, args = parser.parse_known_args()
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args
FAST_ONLY = ns.fast
NO_BATCH = ns.no_batch
NO_CMAKE = ns.no_cmake
GLOBAL_TIMEOUT = ns.timeout
NO_TEARDOWN = ns.no_teardown
if ns.machine is not None:
MACHINE = Machines(machine=ns.machine)
os.environ["CIME_MACHINE"] = ns.machine
elif "CIME_MACHINE" in os.environ:
mach_name = os.environ["CIME_MACHINE"]
MACHINE = Machines(machine=mach_name)
elif config.has_option("create_test", "MACHINE"):
MACHINE = Machines(machine=config.get("create_test", "MACHINE"))
elif config.has_option("main", "MACHINE"):
MACHINE = Machines(machine=config.get("main", "MACHINE"))
else:
MACHINE = Machines()
if ns.compiler is not None:
TEST_COMPILER = ns.compiler
elif config.has_option("create_test", "COMPILER"):
TEST_COMPILER = config.get("create_test", "COMPILER")
elif config.has_option("main", "COMPILER"):
TEST_COMPILER = config.get("main", "COMPILER")
if ns.mpilib is not None:
TEST_MPILIB = ns.mpilib
elif config.has_option("create_test", "MPILIB"):
TEST_MPILIB = config.get("create_test", "MPILIB")
elif config.has_option("main", "MPILIB"):
TEST_MPILIB = config.get("main", "MPILIB")
if ns.test_root is not None:
TEST_ROOT = ns.test_root
elif config.has_option("create_test", "TEST_ROOT"):
TEST_ROOT = config.get("create_test", "TEST_ROOT")
else:
TEST_ROOT = os.path.join(MACHINE.get_value("CIME_OUTPUT_ROOT"),
"scripts_regression_test.%s"% CIME.utils.get_timestamp())
args = lambda: None # just something to set attrs on
for log_param in ["debug", "silent", "verbose"]:
flag = "--%s" % log_param
if flag in sys.argv:
sys.argv.remove(flag)
setattr(args, log_param, True)
else:
setattr(args, log_param, False)
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None)
write_provenance_info()
# Find all python files in repo and create a pylint test for each
if check_for_pylint():
files_to_test = get_all_checkable_files()
for file_to_test in files_to_test:
pylint_test = make_pylint_test(file_to_test, files_to_test)
testname = "test_pylint_%s" % file_to_test.replace("/", "_").replace(".", "_")
expect(not hasattr(B_CheckCode, testname), "Repeat %s" % testname)
setattr(B_CheckCode, testname, pylint_test)
try:
unittest.main(verbosity=2, catchbreak=True)
except CIMEError as e:
if e.__str__() != "False":
print("Detected failures, leaving directory:", TEST_ROOT)
else:
print("All pass, removing directory:", TEST_ROOT)
if os.path.exists(TEST_ROOT) and not NO_TEARDOWN:
shutil.rmtree(TEST_ROOT)
raise
if (__name__ == "__main__"):
_main_func(__doc__)
|
past.py
|
"""An interpreter for My Unreliable Past programs
https://esolangs.org/wiki/My_Unreliable_Past
"""
import logging
import string
import sys
import threading
from random import choice, random, randrange
from esolang import INTERPRETERS
logger = logging.getLogger(__name__)
if sys.version_info.major < 3:
chr = unichr
from Queue import Queue
else:
from queue import Queue
def align(source):
"""Return a string containing the same program but aligned
to the start of a transaction or command (in that order).
This function does not run a complete syntax check, but raises
an ValueError, if the source is commented incorrectly.
Examples:
>>> align("+67; O=0, O+66; O=0, O")
' O=0, O+66; O=0, O+67;'
>>> align("0, O+66, O=")
' O+66, O=0,'
>>> align("=0O")
'O=0'
>>> align("some (comments) in here.)(Only ")
'(Only some (comments) in here.)'
Raises:
ValueError: If the source is commented incorrectly.
"""
# It's important to align at comments first, because
# and of ";,=+-" could be part of a comment.
# make sure, we have a correct count of '(' and ')'
if not source.count('(') == source.count(')'):
raise ValueError(
"Incorrectly commented source: count fo '(':" +
"%d, count for ')': %d." % (source.count('('), source.count(')')))
indices = [idx for idx in range(len(source)) if source[idx] == '(']
for start in indices:
idx = start
count = 0
# Run through the source and keep track of the
# count of opening and closing parentheses.
# If we reach the starting index again and count is 0
# we have found a valid starting index for the program,
# if the count is < 0 at any point, the starting point is invalid.
while True:
if source[idx] == '(':
count += 1
elif source[idx] == ')':
count -= 1
if count < 0:
break
idx += 1
idx %= len(source)
if idx == start:
break
# found a valid start
if count == 0:
return source[start:] + source[:start]
# If we reached this point, there wasn't a valid starting '('.
if indices:
raise ValueError(
"Incorrectly commented source. No valid rotation exists.")
for char in ";,":
try:
idx = source.index(char)
source = source[idx + 1:] + source[:idx + 1]
return source
except ValueError:
pass
# no ";" or "," present align at "+-="
for char in "+-=":
try:
idx = source.index(char)
source = source[idx - 1:] + source[:idx - 1]
return source
except ValueError:
pass
# Source empty? There could still be syntactically invalid programs,
# but this is checked later...
return source
def strip_comments(source):
"""Remove all comments from the source.
This must be run after align().
"""
s = ""
count = 0
for char in source:
if char == '(':
count += 1
elif char == ')':
count -= 1
elif count == 0:
s += char
return s
class Interpreter(object):
lang = "My Unreliable Past"
ext = ".past"
def __init__(
self, infile=sys.stdin, outfile=sys.stdout, errfile=sys.stderr):
self.infile = infile
self.outfile = outfile
self.errfile = errfile
self.registers = dict([(n, 0) for n in "ABCDEFGHIKLMNOPQRSTUWXYZ"])
self.transactions = []
self.tc = 0
self.input_q = Queue()
self.input = []
self.input_idx = 0
self.input_chance = 0.125
self.output_q = Queue()
self.output_chance = 0.125
self.running = True
def input_thread(self):
"""Keeps reading the infile until it closes."""
while self.running:
if self.input_q.empty():
char = self.infile.read(1)
if char:
self.input_q.put(char)
def output_thread(self):
"""Write everything from the output_q to outfile."""
while self.running:
if not self.output_q.empty():
char = self.output_q.get()
self.outfile.write(char)
self.outfile.flush()
def setup_registers(self):
logger.debug("Setting up registers:")
for name in self.registers:
# Randomize register value
# 0 with chance 1/2 = 1/2 ** 1
# 1 with chance 1/4 = 1/2 ** 2
# [2, 3] with chance 1/8 = 1/2 ** 3
# [4, 5, 6, 7] with chance 1/16 = 1/2 ** 4
exp = 0
while True:
vmin = int(2**(exp-1))
vmax = 2**exp
exp += 1
# On each iteration, we accept half of the random() result.
# E.g. This gives us 1/2 ** 3 = 1/8 chance
# for the 3rd iteration.
if random() < 0.5:
self.registers[name] = randrange(vmin, vmax)
logger.debug(" - %s: %d" % (name, self.registers[name]))
break
def load(self, source):
# align the source, strip comments and whitespace,
# split by transaction and command and parse the resulting
# lists.
source = align(source)
source = strip_comments(source)
for char in string.whitespace:
source = source.replace(char, "")
self.transactions = []
for t in source.split(";"):
# skip empty transactions
if not t:
continue
commands = []
for c in t.split(","):
# skip empty commands
if not c:
continue
if len(c) < 3:
raise ValueError(
"Command '%s' too short in transaction '%s'." % (c, t))
if c[0] not in self.registers:
raise ValueError(
("Unsupported register '%s' in " +
"transaction '%s'.") % (c[0], t))
if c[1] not in "+-=":
raise ValueError(
("Unsupported operation '%s' in " +
"transaction '%s'.") % (c[1], t))
try:
value = int(c[2:])
except ValueError:
raise ValueError(
("Invalid integer '%s' in " +
"transaction '%s'.") % (c[2:], t))
commands.append((c[0], c[1], value))
self.transactions.append(commands)
def step(self):
"""Execute a single transaction"""
# Create a backup, in case we have to rollback.
backup = dict(self.registers)
for name, op, value in self.transactions[self.tc]:
if op == "+":
self.registers[name] += value
elif op == "-":
self.registers[name] -= value
# rollback and cancel the transaction,
# if the result is negative.
if self.registers[name] < 0:
self.registers = backup
break
elif op == "=":
# Rollback and cancel the transaction,
# if reg doesn't have the correct value.
if self.registers[name] != value:
self.registers = backup
break
# handle I/O
if self.registers["O"] != 0:
if random() < self.output_chance and self.output_q.empty():
self.output_q.put(chr(self.registers["O"] - 1))
logger.debug("Output register set to 0 from: %d" %
self.registers["O"])
self.registers["O"] = 0
if self.registers["I"] == 0:
if random() < self.input_chance:
char = None
if self.infile.closed:
# replay already seen input, unless we have none.
if len(self.input) > 0:
self.input_idx %= len(self.input)
char = self.input[self.input_idx]
self.input_idx += 1
elif not self.input_q.empty():
# get char from input_q and add it to input
char = self.input_q.get()
self.input.append(char)
if char is not None:
self.registers["I"] = ord(char) + 1
logger.debug("Input register set to: %d" %
self.registers["I"])
self.tc += 1
self.tc %= len(self.transactions)
def run(self, source):
self.load(source)
self.setup_registers()
# Set transaction counter to a random transaction.
self.tc = randrange(len(self.transactions))
logger.debug("Starting I/O threads...")
t1 = threading.Thread(target=self.input_thread)
t2 = threading.Thread(target=self.output_thread)
t1.daemon = t2.daemon = True
t1.start()
t2.start()
while True:
try:
self.step()
except StopIteration:
break
INTERPRETERS.append(Interpreter)
|
executorselenium.py
|
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
testharness_result_converter,
reftest_result_converter,
strip_server)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
extra_timeout = 5
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
class SeleniumProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def setup(self, runner):
"""Connect to browser via Selenium's WebDriver implementation."""
self.runner = runner
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
session_started = False
try:
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
except:
self.logger.warning(
"Connecting to Selenium failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("Selenium session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect to Selenium")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except:
print >> sys.stderr, traceback.format_exc()
self.logger.warning(
"Failed to connect to navigate initial page")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
url = urlparse.urljoin(self.executor.server_url(protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def wait(self):
while True:
try:
self.webdriver.execute_async_script("");
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumRun(object):
def __init__(self, func, webdriver, url, timeout):
self.func = func
self.result = None
self.webdriver = webdriver
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.webdriver, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol.webdriver,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, webdriver, url, timeout):
return webdriver.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000})
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol.webdriver,
self.test_url(test),
test.timeout).run()
def _screenshot(self, webdriver, url, timeout):
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
test_pooling.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test built in connection-pooling with threads."""
import gc
import random
import socket
import sys
import threading
import time
from bson.son import SON
from bson.codec_options import DEFAULT_CODEC_OPTIONS
from pymongo import MongoClient, message
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
DuplicateKeyError,
ExceededMaxWaiters)
sys.path[0:0] = [""]
from pymongo.pool import Pool, PoolOptions
from pymongo.socket_checker import SocketChecker
from test import client_context, unittest
from test.utils import (get_pool,
joinall,
delay,
rs_or_single_client)
@client_context.require_connection
def setUpModule():
pass
N = 10
DB = "pymongo-pooling-tests"
def gc_collect_until_done(threads, timeout=60):
start = time.time()
running = list(threads)
while running:
assert (time.time() - start) < timeout, "Threads timed out"
for t in running:
t.join(0.1)
if not t.is_alive():
running.remove(t)
gc.collect()
class MongoThread(threading.Thread):
"""A thread that uses a MongoClient."""
def __init__(self, client):
super(MongoThread, self).__init__()
self.daemon = True # Don't hang whole test if thread hangs.
self.client = client
self.db = self.client[DB]
self.passed = False
def run(self):
self.run_mongo_thread()
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError
class InsertOneAndFind(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
rand = random.randint(0, N)
_id = self.db.sf.insert_one({"x": rand}).inserted_id
assert rand == self.db.sf.find_one(_id)["x"]
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.db.unique.insert_one({}) # no error
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
try:
self.db.unique.insert_one({"_id": "jesse"})
except DuplicateKeyError:
pass
else:
raise AssertionError("Should have raised DuplicateKeyError")
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.client.close()
class SocketGetter(MongoThread):
"""Utility for TestPooling.
Checks out a socket and holds it forever. Used in
test_no_wait_queue_timeout, test_wait_queue_multiple, and
test_no_wait_queue_multiple.
"""
def __init__(self, client, pool):
super(SocketGetter, self).__init__(client)
self.state = 'init'
self.pool = pool
self.sock = None
def run_mongo_thread(self):
self.state = 'get_socket'
# Pass 'checkout' so we can hold the socket.
with self.pool.get_socket({}, checkout=True) as sock:
self.sock = sock
self.state = 'sock'
def __del__(self):
if self.sock:
self.sock.close_socket(None)
def run_cases(client, cases):
threads = []
n_runs = 5
for case in cases:
for i in range(n_runs):
t = case(client)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run() threw an exception" % repr(t)
class _TestPoolingBase(unittest.TestCase):
"""Base class for all connection-pool tests."""
def setUp(self):
self.c = rs_or_single_client()
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert_one({"_id": "jesse"})
db.test.insert_many([{} for _ in range(10)])
def tearDown(self):
self.c.close()
def create_pool(
self,
pair=(client_context.host, client_context.port),
*args,
**kwargs):
# Start the pool with the correct ssl options.
pool_options = client_context.client._topology_settings.pool_options
kwargs['ssl_context'] = pool_options.ssl_context
kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname
kwargs['server_api'] = pool_options.server_api
pool = Pool(pair, PoolOptions(*args, **kwargs))
pool.ready()
return pool
class TestPooling(_TestPoolingBase):
def test_max_pool_size_validation(self):
host, port = client_context.host, client_context.port
self.assertRaises(
ValueError, MongoClient, host=host, port=port, maxPoolSize=-1)
self.assertRaises(
ValueError, MongoClient, host=host, port=port, maxPoolSize='foo')
c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self.c, [NonUnique, Unique, InsertOneAndFind])
def test_disconnect(self):
run_cases(self.c, [InsertOneAndFind, Disconnect, Unique])
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket.
cx_pool = self.create_pool(max_pool_size=10)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
pass
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_get_socket_and_exception(self):
# get_socket() returns socket after a non-network error.
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
with self.assertRaises(ZeroDivisionError):
with cx_pool.get_socket({}) as sock_info:
1 / 0
# Socket was returned, not closed.
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_closed_socket(self):
# Test that Pool removes explicitly closed socket.
cx_pool = self.create_pool()
with cx_pool.get_socket({}) as sock_info:
# Use SocketInfo's API to close the socket.
sock_info.close_socket(None)
self.assertEqual(0, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
self.assertTrue(sock_info.socket_closed())
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
# Semaphore was released.
with cx_pool.get_socket({}):
pass
def test_socket_closed(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((client_context.host, client_context.port))
socket_checker = SocketChecker()
self.assertFalse(socket_checker.socket_closed(s))
s.close()
self.assertTrue(socket_checker.socket_closed(s))
def test_socket_checker(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((client_context.host, client_context.port))
socket_checker = SocketChecker()
# Socket has nothing to read.
self.assertFalse(socket_checker.select(s, read=True))
self.assertFalse(socket_checker.select(s, read=True, timeout=0))
self.assertFalse(socket_checker.select(s, read=True, timeout=.05))
# Socket is writable.
self.assertTrue(socket_checker.select(s, write=True, timeout=None))
self.assertTrue(socket_checker.select(s, write=True))
self.assertTrue(socket_checker.select(s, write=True, timeout=0))
self.assertTrue(socket_checker.select(s, write=True, timeout=.05))
# Make the socket readable
_, msg, _ = message.query(
0, 'admin.$cmd', 0, -1, SON([('isMaster', 1)]), None,
DEFAULT_CODEC_OPTIONS)
s.sendall(msg)
# Block until the socket is readable.
self.assertTrue(socket_checker.select(s, read=True, timeout=None))
self.assertTrue(socket_checker.select(s, read=True))
self.assertTrue(socket_checker.select(s, read=True, timeout=0))
self.assertTrue(socket_checker.select(s, read=True, timeout=.05))
# Socket is still writable.
self.assertTrue(socket_checker.select(s, write=True, timeout=None))
self.assertTrue(socket_checker.select(s, write=True))
self.assertTrue(socket_checker.select(s, write=True, timeout=0))
self.assertTrue(socket_checker.select(s, write=True, timeout=.05))
s.close()
self.assertTrue(socket_checker.socket_closed(s))
def test_return_socket_after_reset(self):
pool = self.create_pool()
with pool.get_socket({}) as sock:
pool.reset()
self.assertTrue(sock.closed)
self.assertEqual(0, len(pool.sockets))
def test_pool_check(self):
# Test that Pool recovers from two connection failures in a row.
# This exercises code at the end of Pool._check().
cx_pool = self.create_pool(max_pool_size=1,
connect_timeout=1,
wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
self.addCleanup(cx_pool.close)
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
# Swap pool's address with a bad one.
address, cx_pool.address = cx_pool.address, ('foo.com', 1234)
with self.assertRaises(AutoReconnect):
with cx_pool.get_socket({}):
pass
# Back to normal, semaphore was correctly released.
cx_pool.address = address
with cx_pool.get_socket({}, checkout=True) as sock_info:
pass
sock_info.close_socket(None)
def test_wait_queue_timeout(self):
wait_queue_timeout = 2 # Seconds
pool = self.create_pool(
max_pool_size=1, wait_queue_timeout=wait_queue_timeout)
self.addCleanup(pool.close)
with pool.get_socket({}) as sock_info:
start = time.time()
with self.assertRaises(ConnectionFailure):
with pool.get_socket({}):
pass
duration = time.time() - start
self.assertTrue(
abs(wait_queue_timeout - duration) < 1,
"Waited %.2f seconds for a socket, expected %f" % (
duration, wait_queue_timeout))
def test_no_wait_queue_timeout(self):
# Verify get_socket() with no wait_queue_timeout blocks forever.
pool = self.create_pool(max_pool_size=1)
self.addCleanup(pool.close)
# Reach max_size.
with pool.get_socket({}) as s1:
t = SocketGetter(self.c, pool)
t.start()
while t.state != 'get_socket':
time.sleep(0.1)
time.sleep(1)
self.assertEqual(t.state, 'get_socket')
while t.state != 'sock':
time.sleep(0.1)
self.assertEqual(t.state, 'sock')
self.assertEqual(t.sock, s1)
def test_wait_queue_multiple(self):
wait_queue_multiple = 3
pool = self.create_pool(
max_pool_size=2, wait_queue_multiple=wait_queue_multiple)
# Reach max_size sockets.
with pool.get_socket({}):
with pool.get_socket({}):
# Reach max_size * wait_queue_multiple waiters.
threads = []
for _ in range(6):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
with self.assertRaises(ExceededMaxWaiters):
with pool.get_socket({}):
pass
def test_no_wait_queue_multiple(self):
pool = self.create_pool(max_pool_size=2)
socks = []
for _ in range(2):
# Pass 'checkout' so we can hold the socket.
with pool.get_socket({}, checkout=True) as sock:
socks.append(sock)
threads = []
for _ in range(30):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
for socket_info in socks:
socket_info.close_socket(None)
def test_maxConnecting(self):
client = rs_or_single_client()
self.addCleanup(client.close)
pool = get_pool(client)
docs = []
# Run 50 short running operations
def find_one():
docs.append(client.test.test.find_one({'$where': delay(0.001)}))
threads = [threading.Thread(target=find_one) for _ in range(50)]
for thread in threads:
thread.start()
for thread in threads:
thread.join(10)
self.assertEqual(len(docs), 50)
self.assertLessEqual(len(pool.sockets), 50)
# TLS and auth make connection establishment more expensive than
# the artificially delayed query which leads to more threads
# hitting maxConnecting. The end result is fewer total connections
# and better latency.
if client_context.tls and client_context.auth_enabled:
self.assertLessEqual(len(pool.sockets), 30)
else:
self.assertLessEqual(len(pool.sockets), 50)
# MongoDB 4.4.1 with auth + ssl:
# maxConnecting = 2: 6 connections in ~0.231+ seconds
# maxConnecting = unbounded: 50 connections in ~0.642+ seconds
#
# MongoDB 4.4.1 with no-auth no-ssl Python 3.8:
# maxConnecting = 2: 15-22 connections in ~0.108+ seconds
# maxConnecting = unbounded: 30+ connections in ~0.140+ seconds
print(len(pool.sockets))
class TestPoolMaxSize(_TestPoolingBase):
def test_max_pool_size(self):
max_pool_size = 4
c = rs_or_single_client(maxPoolSize=max_pool_size)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
# nthreads had better be much larger than max_pool_size to ensure that
# max_pool_size sockets are actually required at some point in this
# test's execution.
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
assert len(cx_pool.sockets) <= max_pool_size
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
self.assertEqual(0, cx_pool.requests)
def test_max_pool_size_none(self):
c = rs_or_single_client(maxPoolSize=None)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
def test_max_pool_size_zero(self):
with self.assertRaises(ValueError):
rs_or_single_client(maxPoolSize=0)
def test_max_pool_size_with_connection_failure(self):
# The pool acquires its semaphore before attempting to connect; ensure
# it releases the semaphore on connection failure.
test_pool = Pool(
('somedomainthatdoesntexist.org', 27017),
PoolOptions(
max_pool_size=1,
connect_timeout=1,
socket_timeout=1,
wait_queue_timeout=1))
test_pool.ready()
# First call to get_socket fails; if pool doesn't release its semaphore
# then the second call raises "ConnectionFailure: Timed out waiting for
# socket from pool" instead of AutoReconnect.
for i in range(2):
with self.assertRaises(AutoReconnect) as context:
with test_pool.get_socket({}, checkout=True):
pass
# Testing for AutoReconnect instead of ConnectionFailure, above,
# is sufficient right *now* to catch a semaphore leak. But that
# seems error-prone, so check the message too.
self.assertNotIn('waiting for socket from pool',
str(context.exception))
if __name__ == "__main__":
unittest.main()
|
Android-Termux-scrypt.py
|
"""The companion app for my Android Termux scrypt"""
from socket import socket, AF_INET, SOCK_STREAM
import threading
import pyaes
import sys
import subprocess
import os
import time
PORT = 8888
PORT2 = 8889
# This needs changed to a file
IP = '0.0.0.0'
try:
PASSWORD = open('password.conf', 'r').read()
except:
print('ERROR: Can\'t find password.conf file')
sys.exit()
def encrypt(MESSAGE):
# key must be bytes, so we convert it
key = PASSWORD.encode('utf-8')
aes = pyaes.AESModeOfOperationCTR(key)
return aes.encrypt(MESSAGE)
def decrypt(MESSAGE):
# key must be bytes, so we convert it
key = PASSWORD.encode('utf-8')
# CRT mode decryption requires a new instance be created
aes = pyaes.AESModeOfOperationCTR(key)
# decrypted data is always binary, need to decode to plaintext
return aes.decrypt(MESSAGE).decode('utf-8')
def sendcontacts():
ContactsRaw = subprocess.Popen(['termux-contact-list'], stdout=subprocess.PIPE)
Contacts = encrypt(bytes.decode(ContactsRaw.communicate()[0]))
SOCKET.send(bytes.encode(encrypt(Contacts)))
def incomming_texts():
print('incomming_texts thread started')
SOCKET2 = socket(AF_INET, SOCK_STREAM)
SOCKET2.connect((IP, PORT2))
print('connected on port ' + str(PORT2))
while True:
TEXTSRAW1 = subprocess.Popen(['termux-sms-inbox'], stdout=subprocess.PIPE)
TEXTS = encrypt(bytes.decode(TEXTSRAW1.communicate()[0]))
SOCKET.send(bytes.encode(encrypt(TEXTS)))
incomming_texts_thread = threading.Thread(target=incomming_texts)
incomming_texts_thread.daemon = True
incomming_texts_thread.start()
# Definging the serversocket variable and setting it to use the TCP protocol
SOCKET = socket(AF_INET, SOCK_STREAM)
while True:
SOCKET.connect((IP, PORT))
print('connected on port ' + str(PORT))
while True:
DATA = decrypt(SOCKET.recv(1024))
print(DATA)
if(DATA == 'sync contacts'):
print('sending contacts')
sendcontacts()
if(DATA == 'send text'):
print('sending a text message')
NUMBER = decrypt(SOCKET.recv(1024))
MESSAGE = decrypt(SOCKET.recv(1024))
os.system('termux-sms-send -n ' + NUMBER + ' ' + MESSAGE)
print('sent message to ' + NUMBER)
if(DATA == 'exit'):
SOCKET.close()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6632
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
scanner.py
|
# Copyright (c) 2021-2025 Penetrum LLC <contact@penetrum.com> (MIT License)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import shutil
import pathlib
import binascii
import threading
import platform
from stat import (
S_IREAD,
S_IRGRP,
S_IROTH
)
import termcolor
from penne.lib.settings import (
log,
beep,
get_hash,
DEFAULT_MOVE_DIRECTORY,
COMPLETED_RESULTS,
FINISHED_FILES_JSON_LIST,
random_string,
pause,
WORKERS,
StoppableThread,
yara_checker,
sort_yara_rule_output,
load_user_defined,
contains
)
from penne.quarantine.noodler import (
spicy_file,
check_prem
)
from penne.quarantine.db_create import pull_sig
def walk(top, threads=12):
if not os.path.isdir(top):
yield None
lock = threading.Lock()
on_input = threading.Condition(lock)
on_output = threading.Condition(lock)
state = {'tasks': 1}
paths = [top]
output = []
def worker():
while True:
with lock:
while True:
if not state['tasks']:
output.append(None)
on_output.notify()
return
if not paths:
on_input.wait()
continue
path = paths.pop()
break
try:
dirs = []
files = []
for item in sorted(os.listdir(path)):
subpath = os.path.join(path, item)
if os.path.isdir(subpath):
dirs.append(item)
with lock:
state['tasks'] += 1
paths.append(subpath)
on_input.notify()
else:
files.append(item)
with lock:
output.append((path, dirs, files))
on_output.notify()
except OSError:
pass
finally:
with lock:
state['tasks'] -= 1
if not state['tasks']:
on_input.notifyAll()
tmp_worker = [StoppableThread(target=worker, name="penneio.stoppable.walk %d %s" % (i, top)) for i in range(threads)]
for w in tmp_worker:
WORKERS.append(w)
for w in WORKERS:
w.start()
while threads or output:
with lock:
while not output:
on_output.wait()
item = output.pop()
if item:
yield item
else:
threads -= 1
def do_yara_rule_check(filename):
results = check_prem()
if results["Success"]:
results = yara_checker(results["Endpoint"], filename, results["API_KEY"])
else:
results = {"yara_rules": []}
return results
def do_quarn(f, detection_type, arch, detected_as):
parts = pathlib.Path(f)
filename = parts.name
path = parts.parent
quarantine_results = spicy_file(path, filename, detection_type, arch, detected_as)
if quarantine_results["Success"]:
log.info("file sent to cold storage at: {}".format(quarantine_results["ColdFile"]))
else:
log.warn("we were unable to send file to cold storage")
def run_user_defined(filename, user_defined_list):
signature_list = user_defined_list
for signature in signature_list:
with open(signature) as sig, open(filename, "rb") as src:
data = sig.read().split(":")
_, type_, bytes_read, os_filler, signature_ = data[0], data[1], int(data[2]), data[3], data[4]
src_data = binascii.hexlify(src.read(bytes_read))
if src_data == signature_:
return os_filler, get_hash(filename), type_
return None
def check_signature(filename, do_beep=True, user_defined_list=[]):
byte_sizes = (1024, 2048, 4096)
with open(filename, "rb") as f:
for b in byte_sizes:
data = binascii.hexlify(f.read(b)).decode()
matches = pull_sig(data, b)
if matches['Success']:
if do_beep:
beep()
termcolor.cprint(
"\nMatch found:\nPath: {}\nOS Type: {}\nSHA-256: {}\nWarning Type: {}\n".format(
filename, matches['OS'], matches['Hash'], matches['Warning']
)
)
retval = [True, matches["Warning"]]
else:
results = run_user_defined(filename, user_defined_list)
if results is not None:
termcolor.cprint(
"\nUser Defined Match found:\nPath: {}\nOS Type: {}\nSHA-256: {}\nWarning Type: {}\n".format(
filename, results[0], results[1], results[-1]
)
)
retval = [True, results[-1]]
else:
retval = [False, None]
return retval
def move_detected_file(source, detection, detected_as="EVIL AF"):
architecture = platform.architecture()
file_dest_hash = get_hash(source)
file_dest_path = "{}/{}_{}".format(DEFAULT_MOVE_DIRECTORY, file_dest_hash, random_string(length=30))
try:
shutil.move(source, file_dest_path)
except:
log.warning("unable to move file, going to copy it instead and change originals permissions to read only")
shutil.copy(source, file_dest_path)
try:
os.chmod(source, S_IREAD | S_IRGRP | S_IROTH)
except:
log.error("unable to change original source files permissions ({})".format(source))
try:
os.chmod(file_dest_path, S_IREAD | S_IRGRP | S_IROTH)
except:
log.warn("unable to change file attributes to read only")
do_quarn(source, detection, architecture, detected_as)
return file_dest_path
def finish_scan():
def percent(part, whole):
try:
try:
return str(100 * part/whole)[0:5]
except:
return 100 * part/whole
except ZeroDivisionError:
return 0
def show_opts():
retval = ""
if len(COMPLETED_RESULTS["infected_files"]) != 0:
retval += "to see the list of infected files run: penneav --infected\n"
if len(COMPLETED_RESULTS["moved_files"]) != 0:
retval += "to see the files that were moved run: penneav --moved\n"
if len(COMPLETED_RESULTS["unable_to_scan"]) != 0:
retval += "to see files that were unable to be scanned run: penneav --unable\n"
if len(COMPLETED_RESULTS["unable_to_cold_store"]) != 0:
retval += "to see the files that failed cold storage run: penneav --failed\n"
return retval
if not os.path.exists(FINISHED_FILES_JSON_LIST):
attribute = "a+"
else:
attribute = "w"
percentage = percent(COMPLETED_RESULTS["total_scanned"], COMPLETED_RESULTS["total_found"])
with open(FINISHED_FILES_JSON_LIST, attribute) as res:
data = {
"infected": COMPLETED_RESULTS["infected_files"],
"unable": COMPLETED_RESULTS["unable_to_scan"],
"moved": COMPLETED_RESULTS["moved_files"],
"failed": COMPLETED_RESULTS["unable_to_cold_store"]
}
json.dump(data, res)
log.info("scanning finished")
termcolor.cprint(
"\n\nSCAN RESULTS:\n"
"{}\n"
"FINISHED SCANNING: {}\n"
"FILES MOVED: {}\n"
"UNABLE TO BE SCANNED: {}\n"
"INFECTED FILES FOUND: {}\n"
"FAILED COLD STORAGE: {}\n"
"TOTAL AMOUNT OF FILES FOUND DURING SCAN: {}\n"
"PERCENT THAT FINISHED SCANNING: {}%"
"\n{}\n"
"\n"
"{}".format(
"-" * 47,
COMPLETED_RESULTS["total_scanned"],
len(COMPLETED_RESULTS["moved_files"]),
len(COMPLETED_RESULTS["unable_to_scan"]),
len(COMPLETED_RESULTS["infected_files"]),
len(COMPLETED_RESULTS["unable_to_cold_store"]),
COMPLETED_RESULTS["total_found"],
percentage,
"-" * 47, show_opts()
), "green", attrs=["bold"]
)
def scan(start_dir, **kwargs):
do_beep = kwargs.get("do_beep", True)
display_only_infected = kwargs.get("display_only_infected", False)
threads = kwargs.get("threads", 12)
move_detected = kwargs.get("move_detected", False)
follow_syms = kwargs.get("follow_sym", False)
ignored_dirs = kwargs.get("ignored_dirs", [])
ignored_files = kwargs.get("ignored_files", [])
display_yara_rules = kwargs.get("display_yara_rules", True)
skip_yara_rules = kwargs.get("skip_yara_rules", False)
if skip_yara_rules:
display_yara = False
else:
display_yara = True
walked_paths = walk(start_dir, threads=threads)
user_defined = load_user_defined()
log.info("loaded a total of {} user defined signature(s)".format(len(user_defined)))
for data in walked_paths:
root, subs, files = data[0], data[1], data[-1]
paths = [
os.path.join(root, f) for f in files if f not in ignored_files
]
for path in paths:
if not contains(path, ignored_dirs):
try:
COMPLETED_RESULTS["total_found"] += 1
try:
if not display_only_infected:
log.debug("scanning file: {}".format(path))
if follow_syms:
if os.path.islink(path):
if not display_only_infected:
log.info("found symlink and following")
path = os.path.realpath(path)
if not display_only_infected:
log.debug("real path from symlink: {}".format(path))
results = check_signature(path, do_beep=do_beep, user_defined_list=user_defined)
if results[0]:
yara_rule_results = do_yara_rule_check(path)
if len(yara_rule_results["yara_rules"]) != 0:
log.info("file information discovered:\n{}".format("-" * 30))
if display_yara_rules:
for item in yara_rule_results["yara_rules"]:
sort_yara_rule_output(item, display_yara_data=display_yara)
print("-" * 30)
COMPLETED_RESULTS["infected_files"].append(path)
if move_detected:
moved_to = move_detected_file(path, results[1])
log.info("file marked to be moved and moved to: {}".format(moved_to))
COMPLETED_RESULTS["moved_files"].append(path)
COMPLETED_RESULTS["total_scanned"] += 1
except Exception:
if not display_only_infected:
log.error("unable to finish file scanning on filename: {}".format(path))
COMPLETED_RESULTS["unable_to_scan"].append(path)
except KeyboardInterrupt:
results = pause(filename=path)
if results:
continue
else:
pass
else:
pass
|
Bego.py
|
# -*- coding: utf-8 -*-
#baru
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from PyDictionary import PyDictionary
from bs4 import BeautifulSoup
from mergedict import MergeDict
from mergedict import ConfigDict
from gtts import gTTS
from pyowm import OWM
from enum import Enum
#from django.http import HttpResponse
from flask import Flask, send_from_directory, redirect as redirect_flask, render_template
from random import randint
import time, random, sys, re, os, json
import subprocess, threading, string,codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,cookielib,urllib3
import urllib3
import certifi
import ssl
import html5lib,shutil
import subprocess as cmd
import csv
import os
import errno
import imp
import StringIO
import traceback
import linecache
import stat
import cStringIO
import urlparse
import logging
import argparse
#import mimic
import xml
import base64
import ast
#tinkerbell
cl = LINETCR.LINE()
cl.login(token="EyM9FUGnUjCgzBD9g1h3.Pt9vsXYFErULrUPoXesFaW.bG0u1iOg2JkP1Q+/Atx9ulMqGS0ee3aSIOTlLxu95Uw=")
cl.loginResult()
#vidia
kt = LINETCR.LINE()
kt.login(token="EyaIEVzb63WUrxuhjLK9.Ccvp/OzfiBuhC1OEFfFE/q.zkPVC2b6nPuEJPQ1++4auyyX+7WfP56kRtjYSUKvYRY=")
kt.loginResult()
#rosetta
ks = LINETCR.LINE()
ks.login(token="EybY0P2XbUNoMSPpPfsf.x6PpBD6M9uGay7W2CBCh7W.Zzr9UNXomObGyu5UuXWuCs6HJAy32aanc3KePJm+uas=")
ks.loginResult()
#sirvelmist
ki = LINETCR.LINE()
ki.login(token="EyPXlpnJZquL4e86rvpd.IMkU6l6mWW7d6nCQwqXfVq.3d+v3tbpz/t2l2qJ3MqVz1m0OU18jzcDEcGU/kI0dG0=")
ki.loginResult()
#fawn
kk = LINETCR.LINE()
kk.login(token="EyGMJecppztx5ijiMYb9.8hTNx6bS2M+lkjmvPDblYq.jx7LgLZaqUc7EA4aS59vE3pV5EFpNeO/6AnqhepBCig=")
kk.loginResult()
#iridessa
kc = LINETCR.LINE()
kc.login(token="EyXWssYg3VJmDH7KT1y8.AItJ4tS8fBY7GmlNngXfga.HPH/Mwb8aF7VP36Aop5Z+DrjJeDLrRTjSZFvEIaTucU=")
kc.loginResult()
#kicker ghost
#kl = LINETCR.LINE()
#kl.login(token="")
#kl.loginResult()
print "❂☞𖤍آبّہوِ ؏ۤـہمٰ̲ہر𖤍☜❂"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage= """
╔═══════════════════
╠ ✍️MODIFIER✍️
╠❂͜͡➣ Bot1 rename:[text]
╠❂͜͡➣ Bot2 rename:[text]
╠❂͜͡➣ Bot3 rename:[text]
╠❂͜͡➣ Bot4 rename:[text]
╠❂͜͡➣ Bot5 rename:[text]
╠❂͜͡➣ Bot6 rename:[text]
╠❂͜͡➣ All rename:[text]
╠❂͜͡➣ Allbio:[text]
╠❂͜͡➣ Bot1 clone @[name]
╠❂͜͡➣ Bot2 clone @[name]
╠❂͜͡➣ Bot3 clone @[name]
╠❂͜͡➣ Bot4 clone @[name]
╠❂͜͡➣ Bot5 clone @[name]
╠❂͜͡➣ Bot6 clone @[name]
╠❂͜͡➣ Comment:[text]
╠❂͜͡➣ Message:[text]
╠❂͜͡➣ Bot1-6 backup run
╠❂͜͡➣ Bot1-6 backup
╠❂͜͡➣ Group name:[text]
╚═══════════════════
╔═══════════════════
╠ ✍️PROMOTE/DEMOTE✍️
╠❂͜͡➣ Admin on @[name]
╠❂͜͡➣ Expel on @[name]
╠❂͜͡➣ Expelall
╚═══════════════════
╔═══════════════════
╠ ✍️STEALING✍️
╠❂͜͡➣ Steal name @[name]
╠❂͜͡➣ Steal Bio @[name]
╠❂͜͡➣ Steal status @[name]
╠❂͜͡➣ Steal mid @[name]
╠❂͜͡➣ Steal contact @[name]
╠❂͜͡➣ Steal cover @[name]
╠❂͜͡➣ Steal pict @[name]
╠❂͜͡➣ Steal group pict
╠❂͜͡➣ Midpict:[mid]
╠❂͜͡➣ Copy @[name]
╠❂͜͡➣ Kembali ke asli
╚═══════════════════
╔═══════════════════
╠ ✍️GUARD MODE✍️
╠❂͜͡➣ Protect:low
╠❂͜͡➣ Protect:hight
╚═══════════════════
╔═══════════════════
╠ ✍️MARK TO LIST✍️
╠❂͜͡➣ Ban @[name]
╠❂͜͡➣ Unban @[name]
╠❂͜͡➣ Ban group:
╠❂͜͡➣ Del ban:
╠❂͜͡➣ List ban group
╠❂͜͡➣ Banned[send contact]
╠❂͜͡➣ Unbanned[send contact]
╠❂͜͡➣ Ban repeat @[name]
╠❂͜͡➣ Blacklist all
╠❂͜͡➣ Ban cek
╠❂͜͡➣ Clear banlist
╠❂͜͡➣ Mimic target @[name]
╠❂͜͡➣ Mimic untarget @[name]
╠❂͜͡➣ Add friend @[name]
╠❂͜͡➣ Target @[name]
╠❂͜͡➣ Del target @[name]
╠❂͜͡➣ Target list
╚═══════════════════
╔═══════════════════
╠ ✍️INVITATION✍️
╠❂͜͡➣ Invite:[mid]
╠❂͜͡➣ Invite user[contact]
╠❂͜͡➣ Invite me
╠❂͜͡➣ Join all
╠❂͜͡➣ Join group
╚═══════════════════
╔═══════════════════
╠ ✍️LEAVE GROUP✍️
╠❂͜͡➣ Bot2 @bye
╠❂͜͡➣ Bot3 @bye
╠❂͜͡➣ Bot4 @bye
╠❂͜͡➣ Bot5 @bye
╠❂͜͡➣ Bot6 @bye
╠❂͜͡➣ Bye all
╠❂͜͡➣ Center @bye
╠❂͜͡➣ Bye allgroups[own]
╠❂͜͡➣ Leave group:
╚═══════════════════
╔═══════════════════
╠ ✍️BOT AUTO SETTINGS✍️
╠❂͜͡➣ Auto join:on/off
╠❂͜͡➣ Auto leave:on/off
╠❂͜͡➣ Auto like:on/off
╠❂͜͡➣ Welcome message:on/off
╠❂͜͡➣ Auto notice:on/off
╠❂͜͡➣ Blockinvite:on/off
╠❂͜͡➣ Auto blockqr:on/off
╠❂͜͡➣ Namelock:on/off
╠❂͜͡➣ Mimic:on/off
╠❂͜͡➣ Auto add:on/off
╠❂͜͡➣ Check message
╠❂͜͡➣ Add message:[text]
╠❂͜͡➣ Comment:on/off
╠❂͜͡➣ Add comment:[text]
╠❂͜͡➣ Check comment
╠❂͜͡➣ Backup:on/off
╠❂͜͡➣ Gcancel:[number]
╠❂͜͡➣ Update welcome:[text]
╠❂͜͡➣ Check welcome message
╚═══════════════════
╔═══════════════════
╠ ✍️CANCEL MODE✍️
╠❂͜͡➣ Rejectall
╠❂͜͡➣ Clean invites
╠❂͜͡➣ Clear invites
╚═══════════════════
╔═══════════════════
╠ ✍️SUPRISE GIFT✍️
╠❂͜͡➣ gift1-15
╠❂͜͡➣ Spam gift
╚═══════════════════
╔═══════════════════
╠ ✍️NOTIFICATION LIST✍️
╠❂͜͡➣ Group list
╠❂͜͡➣ Banlist
╠❂͜͡➣ Admin list
╠❂͜͡➣ Settings
╠❂͜͡➣ Ginfo
╠❂͜͡➣ TL:[text]
╠❂͜͡➣ Mimic list
╠❂͜͡➣ Details grup:
╠❂͜͡➣ Crash
╠❂͜͡➣ Add all
╚═══════════════════
╔═══════════════════
╠★ KICKER MODE ★
╠❂͜͡➣ Cleanse
╠❂͜͡➣ Vkick @
╠❂͜͡➣ Nk [name]
╠❂͜͡➣ Kick:[mid]
╠❂͜͡➣ Purge
╠❂͜͡➣ Ulti
╠❂͜͡➣ Recover
╚═══════════════════
╔═══════════════════
╠ ✍️CHAT RELATED✍️
╠❂͜͡➣ Spamg[on/off][no][txt]
╠❂͜͡➣ Spam add:[text]
╠❂͜͡➣ Spam change:[text]
╠❂͜͡➣ Spam start:[number]
╠❂͜͡➣ Say [text]
╠❂͜͡➣ Me
╠❂͜͡➣ Speed
╠❂͜͡➣ Debug speed
╠❂͜͡➣ My mid
╠❂͜͡➣ Gcreator
╠❂͜͡➣ Halo
╠❂͜͡➣ Bot contact
╠❂͜͡➣ Bot mid
╠❂͜͡➣ Creator
╠❂͜͡➣ System
╠❂͜͡➣ Iconfig
╠❂͜͡➣ Kernel
╠❂͜͡➣ Cpu
╠❂͜͡➣ Responsename
╠❂͜͡➣ Help
╠❂͜͡➣ Mc:[mid]
╚═══════════════════
╔═══════════════════
╠ ✍️UTILITY✍️
╠❂͜͡➣ Lurking
╠❂͜͡➣ Lurking result
╠❂͜͡➣ Setlastpoint
╠❂͜͡➣ Viewlastseen
╠❂͜͡➣ Link open
╠❂͜͡➣ Link close
╠❂͜͡➣ Gurl
╠❂͜͡➣ Remove chat
╠❂͜͡➣ Bot restart
╚═══════════════════
╔═══════════════════
╠ ✍️CHAT RELATED✍️
╠❂͜͡➣ Lyric [][]
╠❂͜͡➣ Music [][]
╠❂͜͡➣ Wiki [text]
╠❂͜͡➣ Vidio [text]
╠❂͜͡➣ Youtube [text]
╠❂͜͡➣ Instagram [text]
╠❂͜͡➣ Translate-idn [text]
╠❂͜͡➣ Translate-eng [text]
╠❂͜͡➣ Translate-thai [text]
╠❂͜͡➣ Translate-japan [text]
╠❂͜͡➣ Emoji [expression]
╠❂͜͡➣ Info @[name]
╠❂͜͡➣ Ping
╠❂͜͡➣ Time
╠❂͜͡➣ apakah
╠❂͜͡➣ Sticker [expression]
╠❂͜͡➣ Mention all
╠❂͜͡➣ /say
╠❂͜͡➣ /say-en
╠❂͜͡➣ /say-jp
╠❂͜͡➣ Dosa @
╠❂͜͡➣ /
╠❂͜͡➣ Siapa
╚═══════════════════
╔═══════════════════
╠ ✍️BROADCASTING✍️
╠❂͜͡➣ Pm cast [text]
╠❂͜͡➣ Broadcast [text]
╠❂͜͡➣ Spam @[name]
╚═══════════════════
╔═══════════════════
╠ ✍️special command✍️
╠❂͜͡➣ Turn off bots
╚═══════════════════
╔═══════════════════╗
☬ ᎢᎻX FᎾᎡ ᎷY ᎢᎬᎪᎷ ☬
☬ ❂☞𖤍آبّہوِ ؏ۤـہمٰ̲ہر𖤍☜❂ ☬
╚═══════════════════╝
"""
KAC=[cl,ki,kk,kc,ks,kt]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Emid = kt.getProfile().mid
#Fmid = kl.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid]
admin = ["u70254ffa864521b507735a1e7c57def9"]
owner = ["u70254ffa864521b507735a1e7c57def9"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"شكرا لاضافة 😍 الزعيم 😎 ❂☞𖤍آبّہوِ ؏ۤـہمٰ̲ہر𖤍☜❂",
"lang":"JP",
"comment":"AutoLike by :❂☞𖤍آبّہوِ ؏ۤـہمٰ̲ہر𖤍☜❂:https://line.me/ti/p/Yx3w20LEwb ",
"welmsg":"هلا والله 🌷 حيالله💙 ايه والله👈😍👉 لعيونك يا كبير😎 هالرقبة سداده😏 مع تحيات الزعيم ❂☞𖤍آبّہوِ ؏ۤـہمٰ̲ہر𖤍☜❂",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"status":False,
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"welcomemsg":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
wait3 = {
"copy":False,
"copy2":"target",
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kt.getProfile()
backup = kt.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
#=====================================================================================
if op.param3 in mid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
CL.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
X = kt.getGroup(op.param1)
X.preventJoinByTicket = False
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
#======================================================
if op.param3 in Bmid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
G = kt.getGroup(op.param1)
G.preventJoinByTicket = False
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
#=========================================================================
#===========================================
if op.type == 32:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki.cancelGroupInvitation(op.param1, matched_list)
if Bmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if Cmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("^^",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kc.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if op.param3 in wait["blacklist"]:
if not op.param2 in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
cl.sendText(op.param1,"blacklist users are not allowed to sign in -_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param3}
cl.sendMessage(c)
if op.type == 17:
if wait["welcomemsg"] == True:
if op.param2 not in Bots:
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + wait["welmsg"]+ str(ginfo.name))
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
cl.sendText(op.param1,"please do not open link group-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
kicker.kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1,"you are prohibited from inviting-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 15:
if op.param2 in admin:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
if op.type == 19:
if not op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots and admin:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kk.getGroup(op.param1)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kd.getGroup(op.param1)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kf.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ke.getGroup(op.param1)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#========================================================================
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
kg.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kg.getGroup(op.param1)
X.preventJoinByTicket = False
kg.updateGroup(X)
Ti = kg.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kf.getGroup(op.param1)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
kh.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kh.getGroup(op.param1)
X.preventJoinByTicket = False
kh.updateGroup(X)
Ti = kh.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kg.getGroup(op.param1)
X.preventJoinByTicket = True
kg.updateGroup(X)
Ticket = kg.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
kj.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kj.getGroup(op.param1)
X.preventJoinByTicket = False
kj.updateGroup(X)
Ti = kj.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kh.getGroup(op.param1)
X.preventJoinByTicket = True
kh.updateGroup(X)
Ticket = kh.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kj.getGroup(op.param1)
X.preventJoinByTicket = True
kj.updateGroup(X)
Ticket = kj.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ko.getGroup(op.param1)
G.preventJoinByTicket = False
ko.updateGroup(G)
Ti = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kn.getGroup(op.param1)
X.preventJoinByTicket = True
kn.updateGroup(X)
Ti = kn.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#============================================================================
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
kk.like(url[25:58], url[66:], likeType=1001)
kc.like(url[25:58], url[66:], likeType=1001)
kt.like(url[25:58], url[66:], likeType=1001)
ks.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already in the blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"successfully load users into the blacklist")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"successfully removed from the blacklist")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + msg.contentMetadata["displayName"] + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Message :\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + contact.displayName + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Mesage:\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
elif msg.contentType == 16:
if wait["contact"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","help"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,helpt)
elif ("Group name:" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Group name:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite:" in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite:"," ")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'contact bot':
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kt.sendMessage(msg)
#-----------------------------++++-----------------
#=======================================================
elif msg.text.lower() == "crash":
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': "c33b66e4b7709e54a6fe6eced6e57c157',"}
cl.sendMessage(msg)
#-----------------=============================
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'gift1':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift2':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text.lower() == 'gift3':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '3'}
msg.text = None
kk.sendMessage(msg)
elif msg.text.lower() == 'gift4':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '4'}
msg.text = None
kc.sendMessage(msg)
elif msg.text.lower() == 'gift5':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}
msg.text = None
kd.sendMessage(msg)
elif msg.text.lower() == 'gift6':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}
msg.text = None
ke.sendMessage(msg)
elif msg.text.lower() == 'spam gift':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
cl.sendMessage(msg)
ks.sendMessage(msg)
kt.sendMessage(msg)
kt.sendMessage(msg)
#=================================================
#==================================================
elif "All rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("All rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif msg.text.lower() == 'Allbio:':
if msg.from_ in owner:
string = msg.text.lower().replace("allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kt.getProfile()
profile.statusMessage = string
kt.updateProfile(profile)
cl.sendText(msg.to,"successfully turn it into: " + string + "")
elif "Bot1 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot1 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot2 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot2 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot3 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot3 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot4 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot4 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot5 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot5 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot6 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot6 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
kt.sendText(msg.to,"change name: "+string+"\nsucces")
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'bot restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#------------------------_--------------------------------------
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
elif msg.text in ["Creator"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': 'ub76a0153a283da9a1443dfb043181335'}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Creator Saya ")
elif "Admin on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"succes add to adminlist")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
elif msg.text.lower() == 'admin list':
if msg.from_ in admin:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"loading...")
mc = ""
gh = ""
for mi_d in owner:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
for mi_d in admin:
gh += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"=======OWNER=======\n\n" + mc + "\n=======ADMIN=======\n\n" + gh +"\n=====================\n")
print "[Command]Stafflist executed"
elif "Expel on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Expel on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Succes remove admin from adminlist")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
#==========================================================
elif 'bot mid' in msg.text.lower():
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
kt.sendText(msg.to,Emid)
#=======================================================
elif "Translate-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-jp" in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-jp ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'jp')
cl.sendText(msg.to,trs)
print '[Command] Translate jp'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-th " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-th ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate th'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-idn " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
kt.sendText(msg.to,(bctxt))
#======================================
elif "TL:" in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#=================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Welcome message:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome message:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Invite user"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Mc:" in msg.text:
if msg.from_ in admin:
mmid = msg.text.replace("Mc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#=======================================================
elif msg.text in ["Auto notice:on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
#=========================================================================
elif msg.text in ["Auto notice:off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
elif msg.text in ["Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"already activated")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"enable auto koin")
else:
cl.sendText(msg.to,"")
elif msg.text in ["Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#===============================================================
elif msg.text in ["Auto like:on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto like:off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#==========================================================
elif msg.text in ["Settings","Set"]:
if msg.from_ in admin:
print "Setting pick up..."
md="list of bot settings\n\n"
if wait["likeOn"] == True: md+="Auto like : on\n"
else:md+="Auto like : off\n"
if wait["winvite"] == True: md+="Invite : on\n"
else:md+="Invite : off\n"
if wait["pname"] == True: md+="Namelock : on\n"
else:md+="Namelock : off\n"
if wait["contact"] == True: md+="Notice : on\n"
else: md+="Notice : off\n"
if wait["autoJoin"] == True: md+="Auto join : on\n"
else: md +="Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+="Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel : off\n"
if wait["leaveRoom"] == True: md+="Auto leave : on\n"
else: md+="Auto leave : off\n"
if wait["clock"] == True: md+="Clock Name : on\n"
else:md+="Clock Name : off\n"
if wait["autoAdd"] == True: md+="Auto add : on\n"
else:md+="Auto add : off\n"
if wait["commentOn"] == True: md+="Comment : on\n"
else:md+="Comment : off\n"
if wait["Backup"] == True: md+="Backup : on\n"
else:md+="Backup : off\n"
if wait["qr"] == True: md+="Protect QR : on\n"
else:md+="Protect QR : off\n"
if wait["welcomemsg"] == True: md+="welcome message : on\n"
else:md+="welcome message : off\n"
if wait["protectionOn"] == True: md+="Protection : hight\n\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="Protection : low\n\n"+ datetime.today().strftime('%H:%M:%S')
cl.sendText(msg.to,md)
#========================================
#------------------------------------------------
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["PING","Ping","ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs up Har Har")
kk.sendText(msg.to,"PONG double thumbs up Har Har")
kc.sendText(msg.to,"PONG double thumbs up Har Har")
ks.sendText(msg.to,"PONG double thumbs up Har Har")
kt.sendText(msg.to,"PONG double thumbs up Har Har")
cl.sendText(msg.to,"PONG double thumbs up Har Har")
elif "Info @" in msg.text:
if msg.from_ in admin:
nama = msg.text.replace("Info @","")
target = nama.rstrip(' ')
tob = cl.getGroup(msg.to)
for g in tob.members:
if target == g.displayName:
gjh= cl.getContact(g.mid)
try:
cover = cl.channel.getCover(g.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + gjh.displayName + "\n[Mid]:\n" + gjh.mid + "\n[BIO]:\n" + gjh.statusMessage + "\n[pict profile]:\nhttp://dl.profile.line-cdn.net/" + gjh.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
elif msg.text in ["Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
#========================================
#========================================
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif "Message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message:","")
cl.sendText(msg.to,"bot message\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Add message:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Comment:on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Comment:off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Check comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message comment\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#-------------------------------------------------------
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
#===========================================
elif msg.text.lower() == 'responsename':
if msg.from_ in admin:
profile = cl.getProfile()
text = profile.displayName + " "
cl.sendText(msg.to, text)
profile = ki.getProfile()
text = profile.displayName + " "
ki.sendText(msg.to, text)
profile = kk.getProfile()
text = profile.displayName + " "
kk.sendText(msg.to, text)
profile = kc.getProfile()
text = profile.displayName + " "
kc.sendText(msg.to, text)
profile = ks.getProfile()
text = profile.displayName + " "
ks.sendText(msg.to, text)
profile = kt.getProfile()
text = profile.displayName + ""
kt.sendText(msg.to, text)
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
elif "copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy1 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy1 @","")
_nametarget = _name.rstrip(' ')
gs = kk.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kk.CloneContactProfile(target)
kk.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy2 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy2 @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy3 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy3 @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy4 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy4 @","")
_nametarget = _name.rstrip(' ')
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ks.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ks.CloneContactProfile(target)
ks.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy5 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy5 @","")
_nametarget = _name.rstrip(' ')
gs = kt.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kt.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kt.CloneContactProfile(target)
kt.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
kk.CloneContactProfile(target)
kc.CloneContactProfile(target)
ks.CloneContactProfile(target)
kt.CloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["Kembali ke asli"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
kk.updateDisplayPicture(backup.pictureStatus)
kk.updateProfile(backup)
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
ks.updateDisplayPicture(backup.pictureStatus)
ks.updateProfile(backup)
kt.updateDisplayPicture(backup.pictureStatus)
kt.updateProfile(backup)
cl.sendText(msg.to, "Backup Astro Sukses")
except Exception as e:
cl.sendText(msg.to, str (e))
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Blacklist all" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Blacklist all","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Semua Telah Di Hapus")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Maaf")
else:
for target in targets:
if not target in Bots:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sentText(msg.to,"Berhasil Dihapus")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))#-------------------------------------------------------
#--------------------------------------------------------
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Khusus Creator")
#--------------------------------------------------------
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
ki.sendText(msg.to,"nothing")
kk.sendText(msg.to,"nothing")
kc.sendText(msg.to,"nothing")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
ki.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = cl.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.inviteIntoGroup(i,[Creator])
cl.sendText(msg.to,"Success join to ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
except Exception as e:
cl.sendMessage(msg.to, str(e))
#--------------------------------------------------------
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Add all" in msg.text:
if msg.from_ in admin:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
elif "Ulti " in msg.text:
if msg.from_ in admin:
ulti0 = msg.text.replace("Ulti ","")
ulti1 = ulti0.rstrip()
ulti2 = ulti1.replace("@","")
ulti3 = ulti2.rstrip()
_name = ulti3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
nl.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets ==[]:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nl.kickoutFromGroup(msg.to,[target])
nl.leaveGroup(msg.to)
print (msg.to,[g.mid])
except:
nl.sendText(msg.t,"Ter ELIMINASI....")
nl.sendText(msg.to,"WOLES brooo....!!!")
nl.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.uldateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "loading.....")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
kt.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = kt.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
kt.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kt.getContact(target)
X = contact.displayName
profile = kt.getProfile()
profile.displayName = X
kt.updateProfile(profile)
kt.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kt.getProfile()
lol.statusMessage = Y
kt.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kt.updateProfilePicture(P)
except Exception as e:
kt.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = kt.getProfile()
profile.displayName = x
kt.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
kt.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
kt.updateProfilePicture(p)
kt.sendText(msg.to, "Succes")
except Exception as e:
kt.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Lurking":
if msg.from_ in admin:
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lurking result":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "anda slah ketik-_-")
#========================================
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,kt]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#================================================
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#-------------------Fungsi spam start--------------------------
elif "Spam change:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
#-----------------------------------------------
elif 'apakah' in msg.text.lower():
if msg.from_ in admin:
tanya = msg.text.lower().replace("apakah","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#================================================
#===============================================
#=================================================
elif "Spamg " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spamg "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Steal mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Steal mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#========================================
elif msg.text in ["Join all"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
info = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kt.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "All_Kickers_Ok!"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#=====================================================================================
elif msg.text in ["Bye allgroups"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ks.leaveGroup(i)
kt.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"bye-bye")
else:
ki.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Bye all"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kt.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Center @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
cl.sendMessage(msg.to,"bye-bye")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Nk "]:
if msg.from_ in admin:
mk0 = msg.text.replace("Nk ","")
mk1 = mk0.lstrip()
mk2 = mk1.replace("@","")
mk3 = mk2.rstrip()
_name = mk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#-------------------------------------------------
elif "/say-jp " in msg.text:
say = msg.text.replace("/say-jp ","")
lang = 'jp'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#------------------------------------------------
elif "/say-en " in msg.text:
say = msg.text.replace("/say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#-----------------------------------------------
elif "/say " in msg.text:
psn = msg.text.replace("/say ","")
tts = gTTS(psn, lang='id', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
#-----------------------------------------------
elif "Siapa " in msg.text:
tanya = msg.text.replace("Siapa ","")
jawab = ("Dia yg kebanyakan micin"," Dia gila")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
elif "Dosa @" in msg.text:
tanya = msg.text.replace("Dosa @","")
jawab = ("60%","70%","80%","90%","100%","Tak terhingga")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendText(msg.to,"Dosanya adalah cek voie ini")
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
#==========================================
elif "/ " in msg.text.lower():
txt = msg.text.replace("kedapkedip ", "")
t1 = "\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xa0\x81\xf4\x80\xa0\x81\xf4\x80\xa0\x81"
t2 = "\xf4\x80\x82\xb3\xf4\x8f\xbf\xbf"
cl.sendText(msg.to, t1 + txt + t2)
#-------Cek sider biar mirip kek siri-----------------------------
elif "Setlastpoint" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
#cl.sendText(msg.to, "Checkpoint checked!")
cl.sendText(msg.to, "Set the lastseens' point(`・ω・´)\n\n" + datetime.now().strftime('%H:%M:%S'))
print "Setlastpoint"
#--------------------------------------------
elif "Viewlastseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%d日 %H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
grp = '\n• '.join(str(f) for f in dataResult)
total = '\nThese %iuesrs have seen at the lastseen\npoint(`・ω・´)\n\n%s' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "• %s %s" % (grp, total))
else:
cl.sendText(msg.to, "Sider ga bisa di read cek setpoint dulu bego tinggal ketik\nSetlastpoint\nkalo mau liat sider ketik\nViewlastseen")
print "Viewlastseen"
#==========================================
elif msg.text in ["Purge"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"group purge")
return
for jj in matched_list:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear banlist"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"succes clear all banlist")
elif msg.text in ["Banned"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"blacklist user list")
mc = "[⎈]Blacklist User[⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
#=============================================
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban repeat " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned ")
except:
pass
#============================================
#elif msg.text in ["Clear"]:
#if msg.toType == 2:
#group = cl.getGroup(msg.to)
#gMembMids = [contact.mid for contact in group.invitee]
#for _mid in gMembMids:
#random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
#cl.sendText(msg.to,"Clear boss!!!")
elif msg.text.lower() in ["Ats","Tag","mention all"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = ""
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kt.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ks.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kt.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ki.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kk.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kc.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ks.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kt.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_tune_restore.py
|
# coding: utf-8
import signal
from collections import Counter
import os
import shutil
import tempfile
import time
from typing import List
import unittest
import skopt
import numpy as np
from hyperopt import hp
from nevergrad.optimization import optimizerlib
from zoopt import ValueType
from hebo.design_space.design_space import DesignSpace as HEBODesignSpace
import ray
from ray import tune
from ray._private.test_utils import recursive_fnmatch
from ray.rllib import _register_all
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import ConcurrencyLimiter, Searcher
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.dragonfly import DragonflySearch
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest.flaml import CFO
from ray.tune.suggest.skopt import SkOptSearch
from ray.tune.suggest.nevergrad import NevergradSearch
from ray.tune.suggest.optuna import OptunaSearch, param as ot_param
from ray.tune.suggest.sigopt import SigOptSearch
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.suggest.hebo import HEBOSearch
from ray.tune.trial import Trial
from ray.tune.utils import validate_save_restore
from ray.tune.utils._mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
class TuneInterruptionTest(unittest.TestCase):
def setUp(self) -> None:
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
def testExperimentInterrupted(self):
import multiprocessing
trainer_semaphore = multiprocessing.Semaphore()
driver_semaphore = multiprocessing.Semaphore()
class SteppingCallback(Callback):
def on_step_end(self, iteration, trials, **info):
driver_semaphore.release() # Driver should continue
trainer_semaphore.acquire() # Wait until released
def _run(local_dir):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback()])
local_dir = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run, args=(local_dir, ))
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, steps=20):
self._step = 0
self.steps = steps
def on_trial_start(self, trials, **info):
self._step += 1
if self._step >= self.steps:
print(f"Failing after step {self._step} with "
f"{len(trials)} trials")
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
class CheckTrialResourcesCallback(Callback):
"""Checks if pending trials are requesting the right amount of
resources.
The check happens exactly once after `check_after` number of calls
to on_step_begin(). Note, we deliberately delay the check to after
`check_after` number of steps. This is because when we start a
tuning job from fresh (rather than restored), trial list is still
empty - any check now would be trivial and thus wasted.
"""
def __init__(self, expected_cpu: int, check_after: int = 1):
self._expected_cpu = expected_cpu
self._checked = False
self._check_after = check_after
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
if not self._checked and iteration >= self._check_after:
for trial in trials:
if trial.status == Trial.PENDING:
assert trial.resources.cpu == self._expected_cpu
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
# Wait up to 1.5 seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "1.5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback()],
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback()],
**config)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
# Unfinished trials' resources should be updated.
def testResourceUpdateInResume(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(),
self.CheckTrialResourcesCallback(1)
],
**config)
analysis = tune.run(
"trainable",
resume=True,
resources_per_trial={"cpu": 2},
callbacks=[self.CheckTrialResourcesCallback(2)],
**config)
assert len(analysis.trials) == 27
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir))
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertWarnsRegex(UserWarning,
"exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(10)],
**config)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
from ray.tune.examples.hyperband_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
from ray.tune.utils.mock import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class AbstractWarmStartTest:
def setUp(self):
ray.init(num_cpus=1, local_mode=True)
self.tmpdir = tempfile.mkdtemp()
self.experiment_name = "results"
def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()
_register_all()
def set_basic_conf(self):
raise NotImplementedError()
def run_part_from_scratch(self):
np.random.seed(162)
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
results_exp_1 = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
checkpoint_path = os.path.join(self.tmpdir, "warmStartTest.pkl")
search_alg.save(checkpoint_path)
return results_exp_1, np.random.get_state(), checkpoint_path
def run_from_experiment_restore(self, random_state):
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
search_alg.restore_from_dir(
os.path.join(self.tmpdir, self.experiment_name))
results = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
return results
def run_explicit_restore(self, random_state, checkpoint_path):
np.random.set_state(random_state)
search_alg2, cost = self.set_basic_conf()
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
return tune.run(cost, num_samples=5, search_alg=search_alg2, verbose=0)
def run_full(self):
np.random.seed(162)
search_alg3, cost = self.set_basic_conf()
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
return tune.run(
cost, num_samples=10, search_alg=search_alg3, verbose=0)
def testWarmStart(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_explicit_restore(r_state, checkpoint_path)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
def testRestore(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_from_experiment_restore(r_state)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
class HyperoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"x": hp.uniform("x", 0, 10),
"y": hp.uniform("y", -10, 10),
"z": hp.uniform("z", -10, 0)
}
def cost(space, reporter):
loss = space["x"]**2 + space["y"]**2 + space["z"]**2
reporter(loss=loss)
search_alg = HyperOptSearch(
space,
metric="loss",
mode="min",
random_state_seed=5,
n_initial_points=1,
max_concurrent=1000 # Here to avoid breaking back-compat.
)
return search_alg, cost
class BayesoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self, analysis=None):
space = {"width": (0, 20), "height": (-100, 100)}
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = BayesOptSearch(
space, metric="loss", mode="min", analysis=analysis)
return search_alg, cost
def testBootStrapAnalysis(self):
analysis = self.run_full()
search_alg3, cost = self.set_basic_conf(analysis)
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
tune.run(cost, num_samples=10, search_alg=search_alg3, verbose=0)
class CFOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"height": tune.uniform(-100, 100),
"width": tune.randint(0, 100),
}
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = CFO(
space=space,
metric="loss",
mode="min",
seed=20,
)
return search_alg, cost
class SkoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
optimizer = skopt.Optimizer([(0, 20), (-100, 100)])
previously_run_params = [[10, 0], [15, -20]]
known_rewards = [-189, -1144]
def cost(space, reporter):
reporter(loss=(space["height"]**2 + space["width"]**2))
search_alg = SkOptSearch(
optimizer,
["width", "height"],
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
return search_alg, cost
class NevergradWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
instrumentation = 2
parameter_names = ["height", "width"]
optimizer = optimizerlib.OnePlusOne(instrumentation)
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = NevergradSearch(
optimizer,
parameter_names,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
class OptunaWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from optuna.samplers import TPESampler
space = [
ot_param.suggest_uniform("width", 0, 20),
ot_param.suggest_uniform("height", -100, 100)
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = OptunaSearch(
space, sampler=TPESampler(seed=10), metric="loss", mode="min")
return search_alg, cost
class DragonflyWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from dragonfly.opt.gp_bandit import EuclideanGPBandit
from dragonfly.exd.experiment_caller import EuclideanFunctionCaller
from dragonfly import load_config
def cost(space, reporter):
height, width = space["point"]
reporter(loss=(height - 14)**2 - abs(width - 3))
domain_vars = [{
"name": "height",
"type": "float",
"min": -10,
"max": 10
}, {
"name": "width",
"type": "float",
"min": 0,
"max": 20
}]
domain_config = load_config({"domain": domain_vars})
func_caller = EuclideanFunctionCaller(
None, domain_config.domain.list_of_domains[0])
optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
search_alg = DragonflySearch(
optimizer,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
@unittest.skip("Skip because this doesn't seem to work.")
def testWarmStart(self):
pass
@unittest.skip("Skip because this doesn't seem to work.")
def testRestore(self):
pass
class SigOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = [
{
"name": "width",
"type": "int",
"bounds": {
"min": 0,
"max": 20
},
},
{
"name": "height",
"type": "int",
"bounds": {
"min": -100,
"max": 100
},
},
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
# Unfortunately, SigOpt doesn't allow setting of random state. Thus,
# we always end up with different suggestions, which is unsuitable
# for the warm start test. Here we make do with points_to_evaluate,
# and ensure that state is preserved over checkpoints and restarts.
points = [
{
"width": 5,
"height": 20
},
{
"width": 10,
"height": -20
},
{
"width": 15,
"height": 30
},
{
"width": 5,
"height": -30
},
{
"width": 10,
"height": 40
},
{
"width": 15,
"height": -40
},
{
"width": 5,
"height": 50
},
{
"width": 10,
"height": -50
},
{
"width": 15,
"height": 60
},
{
"width": 12,
"height": -60
},
]
search_alg = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="loss",
mode="min",
points_to_evaluate=points)
return search_alg, cost
def testWarmStart(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testWarmStart()
def testRestore(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testRestore()
class ZOOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
dim_dict = {
"height": (ValueType.CONTINUOUS, [-100, 100], 1e-2),
"width": (ValueType.DISCRETE, [0, 20], False)
}
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=200,
dim_dict=dim_dict,
metric="loss",
mode="min")
return search_alg, cost
class HEBOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space_config = [
{
"name": "width",
"type": "num",
"lb": 0,
"ub": 20
},
{
"name": "height",
"type": "num",
"lb": -100,
"ub": 100
},
]
space = HEBODesignSpace().parse(space_config)
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = HEBOSearch(
space=space, metric="loss", mode="min", random_state_seed=5)
return search_alg, cost
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
main_view.py
|
import os
import sys
import torch
from PyQt5 import QtWidgets
from PyQt5.QtGui import QPixmap, QMovie
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from torchvision.utils import save_image
from PIL import Image
from views.main_view_ui import Ui_MainWindow
class Stream(QObject):
newText = pyqtSignal(str)
def write(self, text):
self.newText.emit(str(text))
class MainView(QMainWindow):
def __init__(self, args, model, main_controller):
super().__init__()
self.args = args
self.export_dir = os.path.join("result", self.args.name)
# Set app color
self._dark_mode()
# Combine model, view, and controller
self._model = model
self._main_controller = main_controller
self._ui = Ui_MainWindow()
self._ui.setupUi(self, self.args)
self._create_actions()
self._make_shortcut()
self._make_connections()
self._ui.paint_scene.hide_pen_preview()
self.blending_path = []
# console panel output
# sys.stdout = Stream(newText=self.onUpdateText)
isExist = os.path.exists(self.export_dir)
if not isExist:
# Create a new directory because it does not exist
os.makedirs(self.export_dir)
def _create_actions(self):
# MainView actions
self.increase_size_action = QtWidgets.QAction('Increase Size', self)
self.addAction(self.increase_size_action)
self.decrease_size_action = QtWidgets.QAction('Decrease Size', self)
self.addAction(self.decrease_size_action)
def _make_shortcut(self):
# UI shortcuts
self._ui.export_action.setShortcut("Ctrl+S")
self._ui.clear_all_action.setShortcut("Ctrl+C")
self._ui.run_action.setShortcut("Ctrl + R")
# MainView shortcuts
self.increase_size_action.setShortcut(']')
self.decrease_size_action.setShortcut('[')
def _make_connections(self):
self._ui.export_action.triggered.connect(lambda: self.save_img("result", 5))
self._ui.clear_all_action.triggered.connect(lambda: self._ui.paint_scene.clear())
self._ui.preference_action.triggered.connect(lambda: self._ui.preference_view.show_event(self.geometry().center()))
self._ui.run_action.triggered.connect(lambda: self.run_model())
self._ui.run_btn.clicked.connect(lambda: self.run_model())
self._model.ddim_changed.connect(self.ddim_update)
self._model.image_blending_changed.connect(self.image_blending_update)
self._model.finished.connect(self.exit_model)
self._ui.brush_action.triggered.connect(lambda: self._ui.paint_scene.choose_brush())
self._ui.eraser_action.triggered.connect(lambda: self._ui.paint_scene.choose_eraser())
self._ui.paint_scene.brushChanged_connect(self._update_brush_ui)
self._ui.palette_action.triggered.connect(self.update_pen_color)
self.increase_size_action.triggered.connect(lambda: self._ui.paint_scene.increment_pen_size(10))
self.decrease_size_action.triggered.connect(lambda: self._ui.paint_scene.increment_pen_size(-10))
self._ui.size_slider.valueChanged.connect(lambda: self.set_pen_size(self._ui.size_slider.value()))
def save_img(self, name, step):
# check data is exist
if len(self.blending_path) <= 1 :
return
# open buffer images
imgs = []
for path in self.blending_path:
imgs.append(Image.open(path))
# concate images
result = self.get_concat_h(imgs[0], imgs[1])
for i in range(2, len(imgs)):
result = self.get_concat_h(result, imgs[i])
# save strip image
save_path = os.path.join(self.export_dir, name+".png")
result.save(save_path)
# form a circle with first image
for i in range(self.args.out_width):
result = self.get_concat_h(result, imgs[i%(self.args.canvas*2)])
width = imgs[0].width * self.args.out_width
height = imgs[0].height
# save gif
gif_buffer = []
for i in range(0, result.width-width, step):
buffer = Image.new('RGB', (width , height))
region = result.crop((i, 0, i+width, height))
buffer.paste(region, (0, 0))
gif_buffer.append(buffer)
gif_path = os.path.join(self.export_dir, name+".gif")
gif_buffer[0].save(fp=gif_path, format='GIF', append_images=gif_buffer[1:],
save_all=True, duration=self.args.duration, loop=0)
def demo_gif(self):
# check data is exist
if len(self.blending_path) <= 1 :
return
# open buffer images
imgs = []
for path in self.blending_path:
imgs.append(Image.open(path))
# concate images
result = self.get_concat_h(imgs[0], imgs[1])
for i in range(2, len(imgs)):
result = self.get_concat_h(result, imgs[i])
# form a circle with first image
for i in range(self.args.out_width):
result = self.get_concat_h(result, imgs[i%(self.args.canvas*2)])
width = imgs[0].width * self.args.out_width
height = imgs[0].height
# save gif
gif_buffer = []
for i in range(0, result.width-width, 20):
buffer = Image.new('RGB', (width , height))
region = result.crop((i, 0, i+width, height))
buffer.paste(region, (0, 0))
gif_buffer.append(buffer)
gif_path = os.path.join(self.export_dir, "tmp.gif")
gif_buffer[0].save(fp=gif_path, format='GIF', append_images=gif_buffer[1:],
save_all=True, duration=self.args.duration*4, loop=0)
def get_concat_h(self, im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def run_model(self):
self.blending_path.clear()
strokes = self._ui.paint_scene.get_img()
for id, img in enumerate(strokes):
save_path = os.path.join(self.export_dir, "stroke_"+str(id)+".png")
img.save(save_path)
self._model.set_strokes(strokes)
self._ui.run_btn.setDisabled(True)
self._model.start()
self._ui.run_btn.setText("Infer ddim 1 ...")
def exit_model(self):
import threading
self.view_thread = threading.Thread(target = self.demo_gif())
self.view_thread.start()
self.view_thread.join()
gif = QMovie(self.export_dir+ "/tmp.gif")
self._ui.blending_scene.labels.setMovie(gif)
gif.start()
self._ui.run_btn.setDisabled(False)
self._ui.run_btn.setText("Run")
@pyqtSlot(str, int, torch.Tensor)
def ddim_update(self, src, id, imgs_tensor):
if id < self.args.canvas-1:
self._ui.run_btn.setText("Infer ddim "+str(id+2)+" ...")
else:
self._ui.run_btn.setText("Infer blending ...")
save_path = os.path.join(self.export_dir, src+str(id)+".png")
save_image(imgs_tensor, save_path)
pim = QPixmap(save_path)
self._ui.ddim_scene.labels[id].setPixmap(pim)
@pyqtSlot(str, int, torch.Tensor)
def image_blending_update(self, src, id, imgs_tensor):
save_path = os.path.join(self.export_dir, src+str(id)+".png")
save_image(imgs_tensor, save_path)
# pim = QPixmap(save_path)
self.blending_path.append(save_path)
# self._ui.blending_scene.labels[id].setPixmap(pim)
def _update_brush_ui(self):
self._ui.size_slider.setValue(self._ui.paint_scene.pen_size)
def set_pen_size(self, size):
"""
Sets pen size from slider input
Args:
size (int): diameter of pen
"""
self._ui.paint_scene.set_pen_size(size)
self._update_brush_ui()
def set_pen_color(self, color):
"""
sets pen color
Args:
color (QColor): color to set
"""
self._ui.paint_scene.set_pen_color(color)
self._update_brush_ui()
def update_pen_color(self):
color = self._ui.color_dialog.getColor(options=QtWidgets.QColorDialog.ShowAlphaChannel)
self._ui.paint_scene.set_pen_color(color)
def onUpdateText(self, text):
from PyQt5.QtGui import QTextCursor
cursor = self._ui.process.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self._ui.process.setTextCursor(cursor)
self._ui.process.ensureCursorVisible()
def __del__(self):
sys.stdout = sys.__stdout__
def closeEvent(self, event):
"""Shuts down application on close."""
# Return standard output to defaults.
sys.stdout = sys.__stdout__
self._ui.preference_view.close()
super().closeEvent(event)
def _dark_mode(self):
from PyQt5.QtGui import QPalette
from PyQt5.QtGui import QColor
from PyQt5.QtCore import Qt
# using a palette to switch to dark colors:
dark_palette = QPalette()
dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))
dark_palette.setColor(QPalette.WindowText, Qt.white)
dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))
dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
dark_palette.setColor(QPalette.ToolTipBase, Qt.black)
dark_palette.setColor(QPalette.ToolTipText, Qt.white)
dark_palette.setColor(QPalette.Text, Qt.white)
dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))
dark_palette.setColor(QPalette.ButtonText, Qt.white)
dark_palette.setColor(QPalette.BrightText, Qt.red)
dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))
dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
dark_palette.setColor(QPalette.HighlightedText, Qt.black)
self.setPalette(dark_palette)
|
train_A3C.py
|
from __future__ import print_function
from __future__ import division
import threading
import multiprocessing
import os
import argparse
from time import sleep
import tensorflow as tf
from env_doom import Doom
from net import Net
from worker import Worker
from utils import print_net_params_number
def main(args):
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
tf.reset_default_graph()
global_ep = tf.Variable(
0, dtype=tf.int32, name='global_ep', trainable=False)
env = Doom(visiable=False)
Net(env.state_dim, env.action_dim, 'global', None)
num_workers = args.parallel
workers = []
# create workers
for i in range(num_workers):
w = Worker(i, Doom(), global_ep, args)
workers.append(w)
print('%d workers in total.\n' % num_workers)
saver = tf.train.Saver(max_to_keep=3)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if args.model_path is not None:
print('Loading model...')
ckpt = tf.train.get_checkpoint_state(args.model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Initializing a new model...')
sess.run(tf.global_variables_initializer())
print_net_params_number()
# Start work process for each worker in a seperate thread
worker_threads = []
for w in workers:
run_fn = lambda: w.run(sess, coord, saver)
t = threading.Thread(target=(run_fn))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', default=None,
help='Whether to use a saved model. (*None|model path)')
parser.add_argument(
'--save_path', default='/tmp/a3c_doom/model/',
help='Path to save a model during training.')
parser.add_argument(
'--save_every', default=50, help='Interval of saving model')
parser.add_argument(
'--max_ep_len', default=300, help='Max episode steps')
parser.add_argument(
'--max_ep', default=3000, help='Max training episode')
parser.add_argument(
'--parallel', default=multiprocessing.cpu_count(),
help='Number of parallel threads')
args = parser.parse_args()
return args
if __name__ == '__main__':
# ignore warnings by tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
main(args_parse())
|
test_api.py
|
#!/usr/bin/python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import asyncio
import json
import os
import signal
import time
from math import pi
from multiprocessing import Process
from unittest.mock import patch
import networkx as nx
import numpy as np
import pytest
import requests_mock
from rpcq import Server
from rpcq.messages import BinaryExecutableRequest, BinaryExecutableResponse
from pyquil.api import QVMConnection, QPUCompiler, get_qc, QVMCompiler
from pyquil.api._base_connection import (
validate_noise_probabilities,
validate_qubit_list,
prepare_register_list,
)
from pyquil.device import ISA, NxDevice
from pyquil.gates import CNOT, H, MEASURE, PHASE, Z, RZ, RX, CZ
from pyquil.paulis import PauliTerm
from pyquil.quil import Program
from pyquil.quilbase import Halt, Declare
from pyquil.quilatom import MemoryReference
EMPTY_PROGRAM = Program()
BELL_STATE = Program(H(0), CNOT(0, 1))
BELL_STATE_MEASURE = Program(
Declare("ro", "BIT", 2),
H(0),
CNOT(0, 1),
MEASURE(0, MemoryReference("ro", 0)),
MEASURE(1, MemoryReference("ro", 1)),
)
COMPILED_BELL_STATE = Program(
[
RZ(pi / 2, 0),
RX(pi / 2, 0),
RZ(-pi / 2, 1),
RX(pi / 2, 1),
CZ(1, 0),
RZ(-pi / 2, 0),
RX(-pi / 2, 1),
RZ(pi / 2, 1),
Halt(),
]
)
DUMMY_ISA_DICT = {"1Q": {"0": {}, "1": {}}, "2Q": {"0-1": {}}}
DUMMY_ISA = ISA.from_dict(DUMMY_ISA_DICT)
COMPILED_BYTES_ARRAY = b"SUPER SECRET PACKAGE"
RB_ENCODED_REPLY = [[0, 0], [1, 1]]
RB_REPLY = [Program("H 0\nH 0\n"), Program("PHASE(pi/2) 0\nPHASE(pi/2) 0\n")]
def test_sync_run_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "multishot",
"addresses": {"ro": [0, 1]},
"trials": 2,
"compiled-quil": "DECLARE ro BIT[2]\nH 0\nCNOT 0 1\nMEASURE 0 ro[0]\nMEASURE 1 ro[1]\n",
"rng-seed": 52,
}
return '{"ro": [[0,0],[1,1]]}'
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run(BELL_STATE_MEASURE, [0, 1], trials=2) == [[0, 0], [1, 1]]
# Test no classical addresses
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run(BELL_STATE_MEASURE, trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
mock_qvm.run(EMPTY_PROGRAM)
def test_sync_run(qvm: QVMConnection):
assert qvm.run(BELL_STATE_MEASURE, [0, 1], trials=2) == [[0, 0], [1, 1]]
# Test range as well
assert qvm.run(BELL_STATE_MEASURE, range(2), trials=2) == [[0, 0], [1, 1]]
# Test numpy ints
assert qvm.run(BELL_STATE_MEASURE, np.arange(2), trials=2) == [[0, 0], [1, 1]]
# Test no classical addresses
assert qvm.run(BELL_STATE_MEASURE, trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
qvm.run(EMPTY_PROGRAM)
def test_sync_run_and_measure_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "multishot-measure",
"qubits": [0, 1],
"trials": 2,
"compiled-quil": "H 0\nCNOT 0 1\n",
"rng-seed": 52,
}
return "[[0,0],[1,1]]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run_and_measure(BELL_STATE, [0, 1], trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
mock_qvm.run_and_measure(EMPTY_PROGRAM, [0])
def test_sync_run_and_measure(qvm):
assert qvm.run_and_measure(BELL_STATE, [0, 1], trials=2) == [[1, 1], [0, 0]]
assert qvm.run_and_measure(BELL_STATE, [0, 1]) == [[1, 1]]
with pytest.raises(ValueError):
qvm.run_and_measure(EMPTY_PROGRAM, [0])
WAVEFUNCTION_BINARY = (
b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00?\xe6\xa0\x9ef"
b"\x7f;\xcc\x00\x00\x00\x00\x00\x00\x00\x00\xbf\xe6\xa0\x9ef\x7f;\xcc\x00"
b"\x00\x00\x00\x00\x00\x00\x00"
)
WAVEFUNCTION_PROGRAM = Program(
Declare("ro", "BIT"), H(0), CNOT(0, 1), MEASURE(0, MemoryReference("ro")), H(0)
)
def test_sync_expectation_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "expectation",
"state-preparation": BELL_STATE.out(),
"operators": ["Z 0\n", "Z 1\n", "Z 0\nZ 1\n"],
"rng-seed": 52,
}
return b"[0.0, 0.0, 1.0]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
result = mock_qvm.expectation(
BELL_STATE, [Program(Z(0)), Program(Z(1)), Program(Z(0), Z(1))]
)
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = mock_qvm.pauli_expectation(BELL_STATE, [z0, z1, z01])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_expectation(qvm):
result = qvm.expectation(BELL_STATE, [Program(Z(0)), Program(Z(1)), Program(Z(0), Z(1))])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_expectation_2(qvm):
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = qvm.pauli_expectation(BELL_STATE, [z0, z1, z01])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_paulisum_expectation(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "expectation",
"state-preparation": BELL_STATE.out(),
"operators": ["Z 0\nZ 1\n", "Z 0\n", "Z 1\n"],
"rng-seed": 52,
}
return b"[1.0, 0.0, 0.0]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = mock_qvm.pauli_expectation(BELL_STATE, 1j * z01 + z0 + z1)
exp_expected = 1j
np.testing.assert_allclose(exp_expected, result)
def test_sync_wavefunction(qvm):
qvm.random_seed = 0 # this test uses a stochastic program and assumes we measure 0
result = qvm.wavefunction(WAVEFUNCTION_PROGRAM)
wf_expected = np.array([0.0 + 0.0j, 0.0 + 0.0j, 0.70710678 + 0.0j, -0.70710678 + 0.0j])
np.testing.assert_allclose(result.amplitudes, wf_expected)
def test_validate_noise_probabilities():
with pytest.raises(TypeError):
validate_noise_probabilities(1)
with pytest.raises(TypeError):
validate_noise_probabilities(["a", "b", "c"])
with pytest.raises(ValueError):
validate_noise_probabilities([0.0, 0.0, 0.0, 0.0])
with pytest.raises(ValueError):
validate_noise_probabilities([0.5, 0.5, 0.5])
with pytest.raises(ValueError):
validate_noise_probabilities([-0.5, -0.5, -0.5])
def test_validate_qubit_list():
with pytest.raises(TypeError):
validate_qubit_list([-1, 1])
with pytest.raises(TypeError):
validate_qubit_list(["a", 0], 1)
def test_prepare_register_list():
with pytest.raises(TypeError):
prepare_register_list({"ro": [-1, 1]})
# ---------------------
# compiler-server tests
# ---------------------
def test_get_qc_returns_remote_qvm_compiler(qvm: QVMConnection, compiler: QVMCompiler):
with patch.dict("os.environ", {"COMPILER_URL": "tcp://192.168.0.0:5550"}):
qc = get_qc("9q-square-qvm")
assert isinstance(qc.compiler, QVMCompiler)
mock_qpu_compiler_server = Server()
@mock_qpu_compiler_server.rpc_handler
def native_quil_to_binary(payload: BinaryExecutableRequest) -> BinaryExecutableResponse:
assert Program(payload.quil).out() == COMPILED_BELL_STATE.out()
time.sleep(0.1)
return BinaryExecutableResponse(program=COMPILED_BYTES_ARRAY)
@mock_qpu_compiler_server.rpc_handler
def get_version_info() -> str:
return "1.8.1"
@pytest.fixture
def m_endpoints():
return "tcp://127.0.0.1:5550", "tcp://*:5550"
def run_mock(_, endpoint):
# Need a new event loop for a new process
mock_qpu_compiler_server.run(endpoint, loop=asyncio.new_event_loop())
@pytest.fixture
def server(request, m_endpoints):
proc = Process(target=run_mock, args=m_endpoints)
proc.start()
yield proc
os.kill(proc.pid, signal.SIGINT)
@pytest.fixture
def mock_qpu_compiler(request, m_endpoints, compiler: QVMCompiler):
return QPUCompiler(
quilc_endpoint=compiler.client.endpoint,
qpu_compiler_endpoint=m_endpoints[0],
device=NxDevice(nx.Graph([(0, 1)])),
)
def test_quil_to_native_quil(compiler):
response = compiler.quil_to_native_quil(BELL_STATE)
print(response)
assert response.out() == COMPILED_BELL_STATE.out()
def test_native_quil_to_binary(server, mock_qpu_compiler):
p = COMPILED_BELL_STATE.copy()
p.wrap_in_numshots_loop(10)
# `native_quil_to_executable` will warn us that we haven't constructed our
# program via `quil_to_native_quil`.
with pytest.warns(UserWarning):
response = mock_qpu_compiler.native_quil_to_executable(p)
assert response.program == COMPILED_BYTES_ARRAY
def test_local_rb_sequence(benchmarker):
response = benchmarker.generate_rb_sequence(2, [PHASE(np.pi / 2, 0), H(0)], seed=52)
assert [prog.out() for prog in response] == [
"H 0\nPHASE(pi/2) 0\nH 0\nPHASE(pi/2) 0\nPHASE(pi/2) 0\n",
"H 0\nPHASE(pi/2) 0\nH 0\nPHASE(pi/2) 0\nPHASE(pi/2) 0\n",
]
def test_local_conjugate_request(benchmarker):
response = benchmarker.apply_clifford_to_pauli(Program("H 0"), PauliTerm("X", 0, 1.0))
assert isinstance(response, PauliTerm)
assert str(response) == "(1+0j)*Z0"
def test_apply_clifford_to_pauli(benchmarker):
response = benchmarker.apply_clifford_to_pauli(Program("H 0"), PauliTerm("I", 0, 0.34))
assert response == PauliTerm("I", 0, 0.34)
|
localads_mwm_to_csv.py
|
import csv
import ctypes
import logging
import os
import sys
from multiprocessing import Pool
from multiprocessing import Process
from multiprocessing import Queue
from zlib import adler32
from mwm import MetadataField
from mwm import Mwm
from mwm.ft2osm import read_osm2ft
HEADERS = {
"mapping": "osmid fid mwm_id mwm_version source_type".split(),
"sponsored": "sid fid mwm_id mwm_version source_type".split(),
"mwm": "mwm_id name mwm_version".split(),
}
QUEUES = {name: Queue() for name in HEADERS}
GOOD_TYPES = (
"amenity",
"shop",
"tourism",
"leisure",
"sport",
"craft",
"man_made",
"office",
"historic",
"aeroway",
"natural-beach",
"natural-peak",
"natural-volcano",
"natural-spring",
"natural-cave_entrance",
"waterway-waterfall",
"place-island",
"railway-station",
"railway-halt",
"aerialway-station",
"building-train_station",
)
SOURCE_TYPES = {"osm": 0, "booking": 1}
def generate_id_from_name_and_version(name, version):
return ctypes.c_long((adler32(bytes(name, "utf-8")) << 32) | version).value
def parse_mwm(mwm_name, osm2ft_name, override_version):
region_name = os.path.splitext(os.path.basename(mwm_name))[0]
logging.info(region_name)
with open(osm2ft_name, "rb") as f:
ft2osm = read_osm2ft(f, ft2osm=True, tuples=False)
mwm_file = Mwm(mwm_name)
version = override_version or mwm_file.version().version
mwm_id = generate_id_from_name_and_version(region_name, version)
QUEUES["mwm"].put((mwm_id, region_name, version))
for feature in mwm_file:
osm_id = ft2osm.get(feature.index(), None)
readable_types = feature.readable_types()
if osm_id is None:
metadata = feature.metadata()
if metadata is not None and MetadataField.sponsored_id in metadata:
for t in readable_types:
if t.startswith("sponsored-"):
QUEUES["sponsored"].put(
(
metadata[MetadataField.sponsored_id],
feature.index(),
mwm_id,
version,
SOURCE_TYPES[t[t.find("-") + 1 :]],
)
)
break
else:
for t in readable_types:
if t.startswith(GOOD_TYPES):
QUEUES["mapping"].put(
(
ctypes.c_long(osm_id).value,
feature.index(),
mwm_id,
version,
SOURCE_TYPES["osm"],
)
)
break
def write_csv(output_dir, qtype):
with open(os.path.join(output_dir, qtype + ".csv"), "w") as f:
mapping = QUEUES[qtype].get()
w = csv.writer(f)
w.writerow(HEADERS[qtype])
while mapping is not None:
w.writerow(mapping)
mapping = QUEUES[qtype].get()
def create_csv(output, mwm_path, osm2ft_path, version, threads):
if not os.path.isdir(output):
os.mkdir(output)
# Create CSV writer processes for each queue and a pool of MWM readers.
writers = [Process(target=write_csv, args=(output, qtype)) for qtype in QUEUES]
for w in writers:
w.start()
pool = Pool(processes=threads)
for mwm_name in os.listdir(mwm_path):
if (
"World" in mwm_name
or "minsk_pass" in mwm_name
or not mwm_name.endswith(".mwm")
):
continue
osm2ft_name = os.path.join(osm2ft_path, os.path.basename(mwm_name) + ".osm2ft")
if not os.path.exists(osm2ft_name):
logging.error("Cannot find %s", osm2ft_name)
sys.exit(2)
parse_mwm_args = (os.path.join(mwm_path, mwm_name), osm2ft_name, int(version))
pool.apply_async(parse_mwm, parse_mwm_args)
pool.close()
pool.join()
for queue in QUEUES.values():
queue.put(None)
for w in writers:
w.join()
|
test_output.py
|
import subprocess
import sys
import pytest
import re
import ray
from ray._private.test_utils import run_string_as_driver_nonblocking
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_infeasible():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_gpus=1)
def foo():
pass
x = foo.remote()
time.sleep(15)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Error: No available node types can fulfill" in out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_warn_deadlock():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class A:
pass
a = A.remote()
b = A.remote()
time.sleep(25)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Warning: The following resource request cannot" in out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_autoscaler_no_spam():
script = """
import ray
import time
# Check that there are no false positives with custom resources.
ray.init(num_cpus=1, resources={"node:x": 1})
@ray.remote(num_cpus=1, resources={"node:x": 1})
def f():
time.sleep(1)
print("task done")
ray.get([f.remote() for _ in range(15)])
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" not in out_str
assert "Tip:" not in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_actor(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self):
self.x = module.temporary_python_file()
a = Foo.remote()
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The actor with name Foo failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_fail_importing_task(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def foo():
return module.temporary_python_file()
ray.get(foo.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The remote function failed to import" in err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_worker_stdout():
script = """
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
def foo(out_str, err_str):
print(out_str)
print(err_str, file=sys.stderr)
ray.get(foo.remote("abc", "def"))
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
assert out_str.endswith("abc\n"), out_str
assert "(foo pid=" in out_str, out_str
assert err_str.split("\n")[-2].endswith("def")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_core_worker_error_message():
script = """
import ray
import sys
ray.init(local_mode=True)
# In local mode this generates an ERROR level log.
ray._private.utils.push_error_to_driver(
ray.worker.global_worker, "type", "Hello there")
"""
proc = run_string_as_driver_nonblocking(script)
err_str = proc.stderr.read().decode("ascii")
assert "Hello there" in err_str, err_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_disable_driver_logs_breakpoint():
script = """
import time
import os
import ray
import sys
import threading
ray.init(num_cpus=2)
@ray.remote
def f():
while True:
time.sleep(1)
print("hello there")
sys.stdout.flush()
def kill():
time.sleep(5)
sys.stdout.flush()
time.sleep(1)
os._exit(0)
t = threading.Thread(target=kill)
t.start()
x = f.remote()
time.sleep(2) # Enough time to print one hello.
ray.util.rpdb._driver_set_trace() # This should disable worker logs.
# breakpoint() # Only works in Py3.7+
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
num_hello = out_str.count("hello")
assert num_hello >= 1, out_str
assert num_hello < 3, out_str
assert "Temporarily disabling Ray worker logs" in out_str, out_str
# TODO(ekl) nice to test resuming logs too, but it's quite complicated
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_multi_stdout():
script = """
import ray
import sys
ray.init(num_cpus=1)
@ray.remote
def foo():
print()
@ray.remote
def bar():
print()
@ray.remote
def baz():
print()
ray.get(foo.remote())
ray.get(bar.remote())
ray.get(baz.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
assert "(foo pid=" in out_str, out_str
assert "(bar pid=" in out_str, out_str
assert "(baz pid=" in out_str, out_str
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_actor_stdout():
script = """
import ray
ray.init(num_cpus=2)
@ray.remote
class Actor1:
def f(self):
print("hi")
@ray.remote
class Actor2:
def __init__(self):
print("init")
self.name = "ActorX"
def f(self):
print("bye")
def __repr__(self):
return self.name
a = Actor1.remote()
ray.get(a.f.remote())
b = Actor2.remote()
ray.get(b.f.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
print(out_str)
assert "hi" in out_str, out_str
assert "(Actor1 pid=" in out_str, out_str
assert "bye" in out_str, out_str
assert re.search("Actor2 pid=.*init", out_str), out_str
assert not re.search("ActorX pid=.*init", out_str), out_str
assert re.search("ActorX pid=.*bye", out_str), out_str
assert not re.search("Actor2 pid=.*bye", out_str), out_str
def test_output():
# Use subprocess to execute the __main__ below.
outputs = subprocess.check_output(
[sys.executable, __file__, "_ray_instance"],
stderr=subprocess.STDOUT).decode()
lines = outputs.split("\n")
for line in lines:
print(line)
assert len(lines) == 2, lines
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
# Set object store memory very low so that it won't complain
# about low shm memory in Linux environment.
# The test failures currently complain it only has 2 GB memory,
# so let's set it much lower than that.
MB = 1000**2
ray.init(num_cpus=1, object_store_memory=(100 * MB))
ray.shutdown()
else:
sys.exit(pytest.main(["-v", __file__]))
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster'])
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
os.environ["SIMULATION"] = "1" # Disable submaster alive checks
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=CI)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2018_03_31.models import ManagedCluster
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_04_30.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_04_30.models import NetworkProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:9090".format(listen_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
skip_subnet_role_assignment=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
max_pods=int(max_pods) if max_pods else None
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = default_region_name
workspace_region_code = default_region_code
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap[
rg_location] if AzureCloudRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap[
workspace_region] if AzureCloudLocationToOmsRegionCodeMap[workspace_region] else default_region_code
elif cloud_name.lower() == 'azurechinacloud':
default_region_name = "chinaeast2"
default_region_code = "EAST2"
workspace_region = AzureChinaRegionToOmsRegionMap[
rg_location] if AzureChinaRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap[
workspace_region] if AzureChinaLocationToOmsRegionCodeMap[workspace_region] else default_region_code
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile])
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
else:
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
instance.agent_pool_profiles[0].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
fault_tolerance_test.py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fault tolerance test for parameter server training in TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import threading
import time
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.coordinator import cluster_coordinator
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator as thread_coordinator
from tensorflow.python.training import server_lib
_RPC_ERROR_FROM_WORKER = "GRPC error information from remote target /job:worker"
_RPC_ERROR_FROM_PS = "GRPC error information from remote target /job:ps"
_WORKER_PREEMPTION_THREAD_NAME = "WorkerPreemptionHandler"
_WORKER_THREAD_PREFIX = "WorkerClosureProcessingLoop"
class Model(object):
def __init__(self, coordinator):
self.cluster_coord = coordinator
self.strategy = self.cluster_coord.strategy
with self.cluster_coord.strategy.scope():
self.build()
def build(self):
self.w = variables.Variable(
initial_value=random_ops.random_uniform((10, 10)), dtype=dtypes.float32)
self.iterations = variables.Variable(initial_value=0, dtype=dtypes.int32)
# Allow external control to make the model run its train_fn in an infinite
# loop. This allows us to reliably test worker preemption in the middle of
# function execution.
self.do_infinite_step = variables.Variable(False)
def dataset_fn():
data = random_ops.random_uniform((10, 10))
dataset = dataset_ops.DatasetV2.from_tensors([data]).repeat()
return dataset
self.iterator = iter(
self.cluster_coord.create_per_worker_dataset(dataset_fn))
def _train_fn_internal(self, iterator):
x = math_ops.matmul(array_ops.squeeze(next(iterator)), self.w)
x = math_ops.matmul(random_ops.random_uniform((10, 10)), x)
self.w.assign_add(x)
@def_function.function
def train_fn(self, iterator):
self._train_fn_internal(iterator)
while self.do_infinite_step:
self._train_fn_internal(iterator)
self.iterations.assign_add(1)
def schedule_training_functions(self, num_steps):
with self.strategy.scope():
for _ in range(num_steps):
self.cluster_coord.schedule(self.train_fn, args=(self.iterator,))
def join_training_functions(self):
self.do_infinite_step.assign(False)
self.cluster_coord.join()
class BaseFaultToleranceTest(object): # pylint: disable=missing-docstring
def setUp(self, num_workers, num_ps):
super(BaseFaultToleranceTest, self).setUp()
# Set the environment variable to prevent hanging upon job failure and
# restart. Note that it defaults to 'use_caller' at Google, but defaults
# to False in OSS.
os.environ["GRPC_FAIL_FAST"] = "use_caller"
self._cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
self._cluster_def = self._cluster.cluster_resolver.cluster_spec().as_dict()
self._cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
server_lib.ClusterSpec(self._cluster_def), rpc_layer="grpc")
# The strategy's constructor would connect to the cluster.
self.strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
self.cluster_coord = cluster_coordinator.ClusterCoordinator(self.strategy)
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.num_workers = num_workers
self.num_ps = num_ps
def tearDown(self):
super(BaseFaultToleranceTest, self).tearDown()
self._cluster.stop()
self._cluster = None
def _restart(self, downtime_secs, job):
"""Kills `job` (index: 0) and restarts it after `downtime_secs`.
Args:
downtime_secs: secs before restarting the job.
job: a string specifying the job to restart.
"""
self._cluster.kill_task(job, 0)
time.sleep(downtime_secs)
self.assertFalse(context.check_alive("/job:%s/replica:0/task:0" % job))
self._cluster.start_task(job, 0)
while not context.check_alive("/job:%s/replica:0/task:0" % job):
time.sleep(1)
def _restart_in_thread(self, downtime_secs, restart_job):
def _restart_fn():
with self.thread_coord.stop_on_exception():
self._restart(downtime_secs, restart_job)
restart_thread = threading.Thread(target=_restart_fn)
restart_thread.start()
return restart_thread
def _ensure_threads_closed(self):
"""Ensures worker and preemption threads are closed."""
def _get_running_threads():
"""Returns a set of all running thread names."""
running_threads = set()
for thread in threading.enumerate():
if thread.name is not None:
running_threads.add(thread.name)
return running_threads
def _has_thread(prefix, running_threads):
"""Returns whether any 'running_threads' is prefixed with 'prefix'."""
for thread in running_threads:
if thread.startswith(prefix):
return True
return False
# Worker and preemption threads should exist before releasing
# ClusterCoordinator.
running_threads = _get_running_threads()
self.assertTrue(_has_thread(_WORKER_THREAD_PREFIX, running_threads))
self.assertIn(_WORKER_PREEMPTION_THREAD_NAME, running_threads)
# Wait for threads to close.
self.cluster_coord = None
self.strategy = None
gc.collect()
time.sleep(1)
# Verify thread names.
running_threads = _get_running_threads()
self.assertNotIn(_WORKER_PREEMPTION_THREAD_NAME, running_threads)
self.assertFalse(_has_thread(_WORKER_THREAD_PREFIX, running_threads))
def _create_model_and_run_indefinitely(self):
model = Model(self.cluster_coord)
model.do_infinite_step.assign(True)
model.schedule_training_functions(10)
# Model does infinite training step, so at this moment, we expect to have
# `self.num_workers` infinite closures inflight, and `10-self.num_workers`
# closures in the queue.
while (self.cluster_coord._cluster._closure_queue._inflight_closure_count <
self.num_workers):
time.sleep(0.1)
return model
def testClusterCoordinatorDestroyed(self):
self._ensure_threads_closed()
def testWorkerPreemptionBetweenFunctions(self):
model = Model(self.cluster_coord)
model.schedule_training_functions(2)
model.join_training_functions()
self.assertEqual(model.iterations.numpy(), 2)
self._restart(downtime_secs=2, job="worker")
model.schedule_training_functions(2)
model.join_training_functions()
self.assertEqual(model.iterations.numpy(), 4)
def testWorkerPreemptionMidstFunction(self):
model = Model(self.cluster_coord)
model.do_infinite_step.assign(True)
model.schedule_training_functions(4)
# Model does infinite training step, so at this moment, we expect to have
# `self.num_workers` infinite closures inflight, and `4-self.num_workers`
# closures in the queue.
while (self.cluster_coord._cluster._closure_queue._inflight_closure_count
< self.num_workers):
time.sleep(0.1)
self.assertFalse(self.cluster_coord.done())
self._restart(downtime_secs=2, job="worker")
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 4)
def testOneWorkerPreemptionWithCancellation(self):
@def_function.function
def normal_function():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def error_function():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(
math_ops.reduce_sum(math_ops.matmul(x, y)))
return x
@def_function.function
def long_function():
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
for _ in range(3):
self.cluster_coord.schedule(normal_function)
long_function_result = self.cluster_coord.schedule(long_function)
self.cluster_coord.schedule(error_function)
time.sleep(1) # Let it run a couple steps.
self._restart(1, "worker")
with self.assertRaises(errors.InvalidArgumentError):
self.cluster_coord.join()
with self.assertRaises(errors.CancelledError):
long_function_result.fetch()
for _ in range(3):
self.cluster_coord.schedule(normal_function)
self.cluster_coord.join()
# The cluster is likely still being recovered since `join` returned early
# due to the error_function.
failure_handler = self.cluster_coord._cluster.failure_handler
failure_handler.stop()
failure_handler._preemption_handler_thread.join()
def testHandleDatasetCreationFailure(self):
model = Model(self.cluster_coord)
restart_thread = self._restart_in_thread(5, "worker")
model.schedule_training_functions(3)
model.join_training_functions()
self.thread_coord.join([restart_thread])
self.assertGreaterEqual(model.iterations.numpy(), 3)
def testWorkerPreemptionErrorType(self):
@def_function.function
def worker_train_fn():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
self._restart(2, "worker")
try:
self.thread_coord.join([run_thread])
except errors.UnavailableError as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_WORKER, str(e)) # pylint: disable=g-assert-in-except
self.assertNotIn(_RPC_ERROR_FROM_PS, str(e))
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
def testWorkerPreemptionErrorTypeWithPythonFunction(self):
def worker_train_fn():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
self._restart(2, "worker")
try:
self.thread_coord.join([run_thread])
except errors.UnavailableError as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_WORKER, str(e)) # pylint: disable=g-assert-in-except
self.assertNotIn(_RPC_ERROR_FROM_PS, str(e))
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
def testPSPreemptionErrorType(self):
with ops.device("/job:ps/replica:0/task:0"):
v = variables.Variable(
initial_value=random_ops.random_uniform((2, 10)),
dtype=dtypes.float32)
@def_function.function
def worker_train_fn():
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(v, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
# Use a short restart delay to cover the case that RPC channel is reused
self._restart(1, "ps")
try:
self.thread_coord.join([run_thread])
except (errors.UnavailableError, errors.AbortedError) as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_PS, str(e)) # pylint: disable=g-assert-in-except
if isinstance(e, errors.UnavailableError):
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
if isinstance(e, errors.AbortedError):
self.assertIn("RecvTensor expects a different device incarnation",
str(e))
self._ensure_threads_closed()
def testTwoWorkersPreempted(self):
if self.num_workers < 2:
self.skipTest("Worker number is less than 2.")
model = self._create_model_and_run_indefinitely()
self.assertFalse(self.cluster_coord.done())
self._cluster.kill_task("worker", 0)
self._cluster.kill_task("worker", 1)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self.assertFalse(context.check_alive("/job:worker/replica:0/task:1"))
self._cluster.start_task("worker", 0)
self._cluster.start_task("worker", 1)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
self.assertTrue(context.check_alive("/job:worker/replica:0/task:1"))
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 10)
def testWorkerContinuousFailure(self):
model = self._create_model_and_run_indefinitely()
self.assertFalse(self.cluster_coord.done())
self._cluster.kill_task("worker", 0)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.start_task("worker", 0)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.kill_task("worker", 0)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.start_task("worker", 0)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 10)
def testPSFailureWhileRecoveryFromWokerFailure(self):
# Only by adding this empty test, can the problem of b/180348454 be
# reproduced.
# TODO(yuefengz): fill in this test.
pass
def testNumpyFetchedAfterWorkerFailure(self):
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
@def_function.function
def worker_fn():
return v + 1, v - 1
remote_value = self.cluster_coord.schedule(worker_fn)
# Attempt to fetch before killing worker task should succeed.
self.assertEqual((1, -1), remote_value.fetch())
self._cluster.kill_task("worker", 0)
# So should attempt to fetch after killing worker task.
self.assertEqual((1, -1), remote_value.fetch())
def testClusterStateNotDisrupted(self):
# This test has side effects and can disrupt other tests, even if the
# resource created by it will not be used in following tests.
# TODO(b/155209534): enable this test.
# self.testPSPreemptionErrorType()
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.testWorkerPreemptionMidstFunction()
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.testWorkerPreemptionErrorType()
# In previous tests, workers may fail after training is done. But the
# following tests start with creating resources where failure is not
# handled.
# TODO(b/153888707): enable the following two tests.
# self.testTwoWorkersPreempted()
# self.testWorkerContinuousFailure()
def testJoinRaisesUnavailableErrorAtPsFailure(self):
self._create_model_and_run_indefinitely()
self._cluster.kill_task("ps", 0)
while self.cluster_coord._cluster._closure_queue._error is None:
time.sleep(1)
with self.assertRaises((errors.UnavailableError, errors.NotFoundError,
errors.FailedPreconditionError)):
self.cluster_coord.join()
def testScheduleRaisesUnavailableErrorAtPsFailure(self):
self._create_model_and_run_indefinitely()
self._cluster.kill_task("ps", 0)
while self.cluster_coord._cluster._closure_queue._error is None:
time.sleep(1)
with self.assertRaises((errors.UnavailableError, errors.NotFoundError,
errors.FailedPreconditionError)):
self.cluster_coord.schedule(def_function.function(lambda: None))
def testWorkerExecutionAfterPsFailureRaisesExpectedError(self):
model = self._create_model_and_run_indefinitely()
for i in range(self.num_ps):
self._cluster.kill_task("ps", i)
while self.cluster_coord._cluster._closure_queue._error is None:
time.sleep(1)
@def_function.function
def trivial_function():
return model.iterations + 1
for i in range(self.num_workers):
try:
with ops.device("/job:worker/replica:0/task:{}".format(i)):
trivial_function()
except Exception as e: # pylint: disable=broad-except
if cluster_coordinator._is_ps_failure(e):
if i < self.num_workers - 1:
continue
return
raise AssertionError("Executing a function after PS fails, should "
"result in a PS failure.")
class MultiWorkerFaultToleranceTest(BaseFaultToleranceTest, test.TestCase):
"""Multi worker fault tolerance tests.
This covers the ordinary cases where multiple workers and PS are used.
"""
def setUp(self):
super(MultiWorkerFaultToleranceTest, self).setUp(2, 2)
class SingleWorkerFaultToleranceTest(BaseFaultToleranceTest, test.TestCase):
"""Single worker fault tolerance tests.
This covers the cases that ensure training can continue in a single-worker
cluster, even if the only worker can become unavailable at some point and
recovered (if there are multiple workers, it is possible that the training
succeeds with the workers that did not fail). Realistically single worker
is very rarely used, but the tests are important to ensure the correct
behaviors.
"""
def setUp(self):
super(SingleWorkerFaultToleranceTest, self).setUp(1, 1)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
|
connector.py
|
from socket import socket,AF_INET,SOCK_STREAM,gethostbyname,gethostname,SOL_SOCKET,SO_REUSEADDR
from threading import Thread,Lock
from time import sleep
from exe.proto import *
import traceback
xlock=Lock()
s=socket(AF_INET,SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('',5132))
s.listen(10)
def acceptor(data):
while 1:
try:
con,addr=s.accept()
ips=data['x']['ip']
if ips.get(addr[0]):
host=read(con)[0]
data['connects'].setdefault(addr[0],[]).append(con)
print(f'{host} s connected')
else:
host=read(con)[0]
data['x']['servers'].append(host)
data['x']['ip'][addr[0]]=host
data['x']['host'][host.split('.')[0]]=addr[0]
data['connects'].setdefault(addr[0],[]).append(con)
print(f'New host {host} connected',)
if data['host']==data['x']['master']:
for n in ips:
if data['connects'].get(n):
data['send'].put((data['x']['ip'][n].split('.')[0],{'n':'new','v':data['x']}))
except:
with open('err.log','a') as ff:
traceback.print_exc()
traceback.print_exc(file=ff)
def work(data):
Thread(target=acceptor,args=(data,)).start()
while 1:
try:
ips=data['x']['ip'].copy()
for n in ips:
if not data['connects'].get(n):
try:
s=socket(AF_INET,SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.connect((n,5132))
send(s,data['host'])
data['connects'].setdefault(n,[]).append(s)
print(f'{ips[n].split(".")[0]} c connected')
except:
with open('err.log','a') as ff:
ff.write(f'Can\'t connect {n}\n')
except:
with open('err.log','a') as ff:
traceback.print_exc(file=ff)
ff.write(str(data))
sleep(0.05)
if __name__=="__main__":
acceptor()
|
hub.py
|
import socket
from threading import Thread, Lock
from .common import generateSocket
from .transport import Transport
#################
### CONSTANTS ###
#################
from .constants import PORT, TIMEOUT, SIZE
from .constants import MAX_RETRIES
###############################################################
#################
### HUB CLASS ###
#################
class Hub:
def __init__(self, port=None, timeout=TIMEOUT, size=SIZE):
self.socket = None
self.userDefinedPort = port is not None
self.port = port or PORT
self.timeout = timeout
self.size = size
self.transports = []
self.transportAddresses = []
self.stopped = False
self.opened = False
self.__open()
self.__start()
def __open(self):
while True:
try:
self.socket = generateSocket(self.timeout)
self.socket.bind(("", self.port))
self.socket.listen()
break
except OSError as e:
if self.userDefinedPort or self.port > (PORT + MAX_RETRIES):
raise RuntimeError("Socket address in use: {}".format(e))
return
self.port += 1
except socket.timeout:
continue
def __start(self):
if self.socket is None:
raise RuntimeError("Hub started without host socket")
self.opened = True
Thread(target=self.__run, args=()).start()
return self
def __run(self):
tmp = ""
while True:
if self.stopped:
for t in self.transports:
t.close()
self.socket.close()
return
try:
s, addr = self.socket.accept()
if addr not in self.transportAddresses:
self.transportAddresses.append(addr)
addr, port = addr
t = Transport(None, self.timeout, self.size)
t.receive(s, addr, port)
self.transports.append(t)
except socket.timeout:
continue
def connect(self, name, addr, port):
t = Transport(self.timeout, self.size)
t.connect(name, addr, port)
self.transports.append(t)
return self
def close(self):
self.opened = False
self.stopped = True
def getConnections(self):
return self.transports
##########################
### INTERFACE, GETTERS ###
##########################
def get_all(self, channel):
data = []
for t in self.transports:
tmp = t.get(channel)
if tmp is not None:
data.append(tmp)
return data
def get_by_name(self, name, channel):
data = []
for t in self.transports:
if t.name == name:
tmp = t.get(channel)
if tmp is not None:
data.append(tmp)
return data
def get_local(self, channel):
data = []
for t in self.transports:
if t.type == transport.TYPE_LOCAL:
tmp = t.get(channel)
if tmp is not None:
data.append(tmp)
return data
def get_remote(self, channel):
data = []
for t in self.transports:
if t.type == transport.TYPE_REMOTE:
tmp = t.get(channel)
if tmp is not None:
data.append(tmp)
return data
##########################
### INTERFACE, WRITERS ###
##########################
def write_all(self, channel, data):
for t in self.transports:
t.write(channel, data)
return self
def write_to_name(self, name, channel, data):
for t in self.transports:
if t.name == name:
t.write(channel, data)
return self
def write_to_local(self, channel, data):
for t in self.transports:
if t.type == transport.TYPE_REMOTE:
t.write(channel, data)
return self
def write_to_remote(self, channel, data):
for t in self.transports:
if t.type == transport.TYPE_LOCAL:
t.write(channel, data)
return self
def write_image_all(self, data):
for t in self.transports:
t.writeImg(data)
return self
def write_image_to_name(self, name, data):
for t in self.transports:
if t.name == name:
t.writeImg(data)
return self
def write_image_to_local(self, data):
for t in self.transports:
if t.type == transport.TYPE_REMOTE:
t.writeImg(data)
return self
def write_image_to_remote(self, data):
for t in self.transports:
if t.type == transport.TYPE_LOCAL:
t.writeImg(data)
return self
|
utils.py
|
# coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2014.
"""
Contains useful functions which used across the modules
"""
import threading
import xmpp
import urllib
from socket import error
from writer import *
isNumber = lambda obj: (not execute(int, (obj,), False) is None)
def execute(handler, list=(), log=True):
"""
Just executes handler(*list) safely
Writes a crashlog if errors occurred
"""
try:
result = handler(*list)
except (SystemExit, xmpp.NodeProcessed):
result = True
except Exception:
result = None
if log:
crashLog(handler.func_name)
return result
def runThread(func, args=(), name=None, att=3, delay=0):
"""
Runs a thread with custom args and name
Needed to reduce code
Parameters:
func: function you need to be running in a thread
args: function arguments
name: thread name
att: number of attempts
delay: if set, then threading.Timer will be started, not threading.Thread
"""
if delay:
logger.debug("threading: starting timer for %s%s, "
"name:%s, delay:%s" % (func.func_name, str(args), name, delay))
thr = threading.Timer(delay, execute, (func, args))
else:
thr = threading.Thread(target=execute, args=(func, args))
name = name or func.__name__
name = str(name) + "-" + str(time.time())
thr.name = name
try:
thr.start()
except (threading.ThreadError):
if att:
return runThread(func, args, name, (att - 1), delay)
crashLog("runThread.%s" % name)
return thr
def safe(func):
"""
Executes func(*args) safely
"""
def wrapper(*args):
try:
func(*args)
except xmpp.NodeProcessed:
pass
except Exception:
crashLog(func.func_name)
wrapper.__name__ = func.__name__
return wrapper
def cache(func):
"""
Caches user/group ids for future usage
"""
def wrapper(self, uid, fields=None):
fields = fields or []
call = False
if uid in self.cache:
for field in fields:
if field not in self.cache[uid]:
call = True
break
else:
call = True
if call:
result = func(self, uid, fields)
if "uid" in result:
del result["uid"]
if uid in self.cache:
self.cache[uid].update(result)
else:
self.cache[uid] = result
else:
result = self.cache[uid]
return result
wrapper.__name__ = func.__name__
return wrapper
def threaded(func):
"""
Another decorator.
Executes a function in a thread
"""
def wrapper(*args):
runThread(func, args)
wrapper.__name__ = "threaded_%s" % func.__name__
return wrapper
def buildDataForm(form=None, type="form", fields=[], title=None, data=[]):
"""
Provides easier method to build data forms using dict for each form object
Parameters:
form: xmpp.DataForm object
type: form type
fields: list of form objects represented as dict, e.g.
[{"var": "cool", "type": "text-single",
"desc": "my cool description", "value": "cool"}]
title: form title
data: advanced data for form. e.g.
instructions (if string in the list), look at xmpp/protocol.py:1326
"""
if title and form:
form.setTitle(title)
form = form or xmpp.DataForm(type, data, title)
for key in fields:
field = form.setField(key["var"], key.get("value"),
key.get("type"), key.get("desc"), key.get("options"))
if key.get("payload"):
field.setPayload(key["payload"])
if key.get("label"):
field.setLabel(key["label"])
if key.get("requred"):
field.setRequired()
return form
def buildIQError(stanza, error=xmpp.ERR_FEATURE_NOT_IMPLEMENTED, text=None):
"""
Provides a way to build IQ error reply
"""
error = xmpp.Error(stanza, error, True)
if text:
tag = error.getTag("error")
if tag:
tag.setTagData("text", text)
return error
def normalizeValue(value):
"""
Normalizes boolean values from dataform replies
"""
if isNumber(value):
value = int(value)
elif value and value.lower() == "true":
value = 1
else:
value = 0
return value
def getLinkData(url, encode=True):
"""
Gets link data and ignores any exceptions
Parameters:
encode: base64 data encode
"""
try:
opener = urllib.urlopen(url)
data = opener.read()
except (Exception, error):
return ""
if data and encode:
data = data.encode("base64")
return data
TIME_VALUES = {"s": 60, "m": 360, "d": 86400, "M": 2592000, "y": 31536000}
def TimeMachine(text):
"""
TARDIS Prototype
"""
time = 0
for i in xrange(0, len(text) - 1, 3):
current = text[i:i + 3]
x = current[-1]
if x in TIME_VALUES:
time += int(current[:-1]) * TIME_VALUES[x]
return time
class ExpiringObject(object):
"""
Object that acts the same as the one it keeps
But also has a limited lifetime
"""
def __init__(self, obj, lifetime):
self.obj = obj
self.created = time.time()
self.lifetime = lifetime
def hasExpired(self):
return time.time() >= (self.created + self.lifetime)
def __getattr__(self, attr):
try:
result = object.__getattribute__(self, attr)
except AttributeError:
result = getattr(self.obj, attr)
return result
def __iter__(self):
if hasattr(self.obj, "__iter__"):
return self.obj.__iter__()
raise TypeError("Not iterable")
def next(self):
if hasattr(self.obj, "next"):
return self.obj.next()
raise TypeError("Not iterable")
# TODO what if our object isn't iterable?
def __str__(self):
result = ""
for num, i in enumerate(self.obj):
result += str(i)
if num < (len(self.obj) - 1):
result += ", "
return result
# Yay!
|
transfer.py
|
import argparse
import json
import os
import queue
import threading
import time
import uuid
from argparse import RawTextHelpFormatter
from datetime import datetime
from pathlib import Path
try:
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
except ImportError:
print('A dependency is missing. Please install: '
'https://pythonhosted.org/watchdog/installation.html')
exit(1)
try:
import requests
except ImportError:
print('A dependency is missing. Please install: '
'https://2.python-requests.org/en/master/user/install/#install')
exit(1)
try:
import jsonlines
except ImportError:
print('A dependency is missing. Please install: '
'https://jsonlines.readthedocs.io/en/latest/#installation')
exit(1)
_queue = queue.Queue(256) # type: ignore
##########################
# Command line arguments #
##########################
def parse_arguments():
parser = argparse.ArgumentParser(description="""
Important! Before starting the image transfer:
1) Make sure to start the SDK.
2) Get your ParkPow API token from: https://app.parkpow.com/accounts/token (optional if using parkpow)
3) Get your PlateRecognizer API token from: https://app.platerecognizer.com/start/ (optional if using Cloud instead of local SDK)
Here is an example of how to call this script if using Local SDK:
python transfer.py --source /home/alpr/camera-images/ --archive /home/alpr/archived-images/ --alpr-api http://localhost:8080/alpr --parkpow-token MY_TOKEN --cam-pos 2
Example of how to call this script is using the Cloud Api
python transfer.py --source /home/alpr/camera-images/ --archive /home/alpr/archived-images/ --alpr-api https://api.platerecognizer.com/v1/plate-reader --platerec-token MY_PLATEREC_TOKEN --parkpow-token MY_TOKEN --cam-pos 2
The path of each image must contain a directory with the camera name.
It is specified with the --cam-pos argument.
Once processed, images are moved to the archive directory.
If it the --api-url is not defined, the results will be saved in the output file --output-file
""",
formatter_class=RawTextHelpFormatter)
parser.add_argument('--source',
help='Where camera images are saved.',
type=str,
required=True)
parser.add_argument(
'--archive',
help='Where images are moved to archive after being processed.',
type=str,
required=True)
parser.add_argument('--parkpow-token',
help='API token for ParkPow.',
type=str,
required=False)
parser.add_argument('--platerec-token',
help='API token for PlateRecognizer.',
type=str,
required=False)
parser.add_argument(
'--cam-pos',
help=
'Position of the directory with camera name (.../4/3/2/1/image.jpg).\n'
'For example, with /home/export/parking/camera/july/image.jpg, set --cam-pos=2',
type=int,
required=True)
parser.add_argument('--workers',
help='Number of worker threads.',
type=int,
default=2)
parser.add_argument(
'--alpr-api',
help='URL of Cloud/SDK API.',
default='https://api.platerecognizer.com/v1/plate-reader')
parser.add_argument(
'--use-parkpow',
help='Upload results to ParkPow',
action='store_true')
parser.add_argument('--output-file',
help="Json file with response",
type=str,
required=False)
return parser.parse_args()
##################
# Process images #
##################
def image_transfer(src_path, args):
split = Path(src_path).parts
# make this better
if args.cam_pos >= len(split):
print('Image path does not match template. Call with -h to see help.')
return
filename = split[-1]
camera = split[-args.cam_pos - 1]
results = alpr(src_path, args)
if not results:
return
if not args.output_file:
payload = {
"results": json.dumps(results),
"camera": camera,
}
files = {
'image': (filename, open(src_path,
'rb'), 'application/octet-stream')
}
response = api_request(args, payload, files)
if not response:
return
else:
with jsonlines.open(args.output_file, mode='a') as json_file:
json_file.write(results)
response = results
# Move to archive
archive_dir = '{0}/{1}/{2:%Y}/{2:%m}/{2:%d}'.format(args.archive, camera,
datetime.now())
destination = '{}/{}={}'.format(archive_dir, uuid.uuid4(), filename)
try:
Path(archive_dir).mkdir(parents=True, exist_ok=True)
os.rename(src_path, destination)
except (PermissionError, OSError):
print('%s could not be moved to archive folder.' % src_path)
return dict(dest=destination, response=response)
def alpr(path, args):
print('Sending %s' % path)
try:
if 'localhost' in args.alpr_api:
time.sleep(1) # Wait for the whole image to arrive
with open(path, 'rb') as fp:
response = requests.post(args.alpr_api,
files=dict(upload=fp),
timeout=10)
else:
time.sleep(1) # Wait for the whole image to arrive
filename = os.path.basename(path)
response = requests.post(
args.alpr_api,
files=dict(upload=(filename, open(path, 'rb'),
'application/octet-stream')),
headers={'Authorization': 'Token ' + args.platerec_token})
except requests.exceptions.Timeout:
print('SDK: Timeout')
return
except ConnectionError:
print('SDK: ConnectionError')
return
except PermissionError:
print('SDK: %s could not be read.' % path)
return
except Exception as e:
print(e)
data = response.json()
# TODO: skip data if there is no change
if 'results' not in data:
print(data)
return []
return data['results']
def api_request(args, payload, files):
api_url = 'https://app.parkpow.com/api/v1/log-vehicle'
headers = {'Authorization': 'Token {}'.format(args.parkpow_token)}
try:
response = requests.post(api_url,
data=payload,
headers=headers,
files=files,
timeout=20)
except ConnectionError:
print('ParkPow API: ConnectionError')
return
except requests.exceptions.Timeout:
print('ParkPow API: Timeout')
return
return response
###################
# File monitoring #
###################
def worker(args):
while True:
image_transfer(_queue.get(), args)
_queue.task_done()
class Handler(PatternMatchingEventHandler):
def on_created(self, event):
try:
_queue.put(event.src_path)
except queue.Full:
print('Queue is full. Skipping %s.' % event.scr_path)
def main(args, debug=False):
if args.source in args.archive:
print('Archive argument should not be in source directory.')
return exit(1)
observer = Observer()
observer.schedule(Handler(ignore_directories=True,
patterns='*.jpg *.jpeg'.split()),
args.source,
recursive=True)
observer.start()
for _ in range(args.workers):
t = threading.Thread(target=worker, args=(args,))
t.daemon = True
t.start()
print('Monitoring source directory.')
try:
while True:
time.sleep(1 if debug else .25)
if debug:
break
except KeyboardInterrupt:
pass
print('Closing...')
observer.stop()
observer.join()
_queue.join()
def validate_env(args):
messages = []
Path(args.archive).mkdir(parents=True, exist_ok=True)
if not Path(args.archive).exists():
messages.append('%s does not exist.' % args.archive)
if not Path(args.source).exists():
messages.append('%s does not exist.' % args.source)
if not args.use_parkpow and not args.output_file:
messages.append("Pass argument --use-parkpow or the argument --output-file")
if '/v1/plate-reader' in args.alpr_api and not args.platerec_token:
messages.append(
"Missing argument --platerec-token or SDK argument --alpr-api")
elif '/alpr' in args.alpr_api:
try:
response = requests.get(args.alpr_api.rsplit('/', 1)[0], timeout=2)
except Exception:
response = None
if not response or response.status_code != 200:
messages.append('Make sure that the SDK is up and running (%s).' %
args.alpr_api)
if args.use_parkpow:
api_url = 'https://app.parkpow.com/api/v1/log-vehicle'
try:
response = requests.get(api_url.rsplit('/', 1)[0] +
'/parking-list',
headers={
'Authorization':
'Token {}'.format(args.parkpow_token)
},
timeout=2)
except Exception:
response = None
if not response or response.status_code != 200:
messages.append(response.json(
) if response else 'Parkpow server could not be reached.')
if len(messages) > 0:
print('Script initialization failed:')
print('\n'.join(messages))
print('Exiting...')
exit(1)
if __name__ == "__main__":
args = parse_arguments()
validate_env(args)
main(args)
|
test_tpe.py
|
from unittest import TestCase, main
import os, pty
import curses.ascii
import threading
from faker import Faker
from payment_card_identifier import VISA
from telium.payment import TeliumData
from telium import *
class FakeTeliumDevice:
def __init__(self):
self._master, self._slave = pty.openpty()
self._s_name = os.ttyname(self._slave)
self._fake = Faker()
self._fake_device = threading.Thread(target=self.__run)
def run_instance(self):
self._fake_device.start()
@property
def s_name(self):
return self._s_name
@staticmethod
def _has_signal(data, signal):
return (data[0] if six.PY3 else ord(data[0])) == curses.ascii.controlnames.index(signal)
@staticmethod
def _create_signal(signal):
return bytes([curses.ascii.controlnames.index(signal)]) if six.PY3 else bytes(chr(curses.ascii.controlnames.index(signal)))
def _wait_signal(self, signal):
return FakeTeliumDevice._has_signal(os.read(self._master, 1), signal)
def _send_signal(self, signal):
os.write(self._master, FakeTeliumDevice._create_signal(signal))
def __run(self):
if self._wait_signal('ENQ'):
self._send_signal('ACK')
raw_data = os.read(self._master, TERMINAL_ANSWER_COMPLETE_SIZE)
if TeliumData.lrc_check(raw_data) is True:
payment_pending = TeliumAsk.decode(raw_data)
print('from slave : ', payment_pending.__dict__)
self._send_signal('ACK') # Accept data from master
if not self._wait_signal('EOT'):
self._send_signal('NAK')
exit(1)
if payment_pending.answer_flag == TERMINAL_ANSWER_SET_FULLSIZED:
my_response = TeliumResponse(
payment_pending.pos_number,
TERMINAL_PAYMENT_SUCCESS,
payment_pending.amount,
payment_pending.payment_mode,
(self._fake.credit_card_number(card_type='visa16') + ' ' + '0' * 37),
payment_pending.currency_numeric,
'0' * 10
)
elif payment_pending.answer_flag == TERMINAL_ANSWER_SET_SMALLSIZED:
my_response = TeliumResponse(
payment_pending.pos_number,
TERMINAL_PAYMENT_SUCCESS,
payment_pending.amount,
payment_pending.payment_mode,
None,
payment_pending.currency_numeric,
'0' * 10
)
else:
self._send_signal('NAK')
exit(1)
self._send_signal('ENQ')
if self._wait_signal('ACK'):
os.write(
self._master,
bytes(my_response.encode(), TERMINAL_DATA_ENCODING) if six.PY3 else bytes(my_response.encode())
)
if self._wait_signal('ACK'):
self._send_signal('EOT')
exit(0)
self._send_signal('NAK')
else:
self._send_signal('NAK')
exit(1)
else:
self._send_signal('NAK')
exit(1)
class TestTPE(TestCase):
def setUp(self):
self._fake_device = FakeTeliumDevice()
def test_demande_paiement_fullsized_repport(self):
self._fake_device.run_instance()
my_telium_instance = Telium(self._fake_device.s_name)
self.assertFalse(my_telium_instance.debugging)
self.assertTrue(my_telium_instance.is_open)
self.assertEqual(my_telium_instance.timeout, 1)
self.assertTrue(my_telium_instance.close())
self.assertTrue(my_telium_instance.open())
# Construct our payment infos
my_payment = TeliumAsk(
'1', # Checkout ID 1
TERMINAL_ANSWER_SET_FULLSIZED, # Ask for fullsized repport
TERMINAL_MODE_PAYMENT_DEBIT, # Ask for debit
TERMINAL_TYPE_PAYMENT_CARD, # Using a card
TERMINAL_NUMERIC_CURRENCY_EUR, # Set currency to EUR
TERMINAL_REQUEST_ANSWER_WAIT_FOR_TRANSACTION, # Do not wait for transaction end for terminal answer
TERMINAL_FORCE_AUTHORIZATION_DISABLE, # Let device choose if we should ask for authorization
12.5 # Ask for 12.5 EUR
)
# Send payment infos to device
self.assertTrue(my_telium_instance.ask(my_payment))
my_answer = my_telium_instance.verify(my_payment)
self.assertIsNotNone(my_answer)
print('from master : ', my_answer.__dict__)
self.assertEqual(my_answer.transaction_result, 0)
self.assertEqual(my_answer.currency_numeric, TERMINAL_NUMERIC_CURRENCY_EUR)
self.assertEqual(my_answer.private, '0' * 10)
self.assertIsInstance(my_answer.card_type, VISA)
self.assertEqual(my_answer.card_type.numbers, my_answer.repport.split(' ')[0])
self.assertIsInstance(my_answer.__dict__.get('_card_type'), dict)
self.assertEqual(my_answer.card_id[0], '4')
self.assertTrue(my_telium_instance.close())
self.assertFalse(my_telium_instance.close())
def test_demande_paiement_smallsized_repport(self):
self._fake_device.run_instance()
my_telium_instance = Telium(self._fake_device.s_name, debugging=True)
self.assertFalse(my_telium_instance.open())
self.assertTrue(my_telium_instance.debugging)
# Construct our payment infos
my_payment = TeliumAsk(
'1', # Checkout ID 1
TERMINAL_ANSWER_SET_SMALLSIZED, # Ask for fullsized repport
TERMINAL_MODE_PAYMENT_DEBIT, # Ask for debit
TERMINAL_TYPE_PAYMENT_CARD, # Using a card
TERMINAL_NUMERIC_CURRENCY_EUR, # Set currency to EUR
TERMINAL_REQUEST_ANSWER_WAIT_FOR_TRANSACTION, # Do not wait for transaction end for terminal answer
TERMINAL_FORCE_AUTHORIZATION_DISABLE, # Let device choose if we should ask for authorization
91.1 # Ask for 12.5 EUR
)
# Send payment infos to device
self.assertTrue(my_telium_instance.ask(my_payment, True))
my_answer = my_telium_instance.verify(my_payment)
self.assertIsNotNone(my_answer)
print('from master : ', my_answer.__dict__)
self.assertEqual(my_answer.transaction_result, 0)
self.assertEqual(my_answer.currency_numeric, TERMINAL_NUMERIC_CURRENCY_EUR)
self.assertEqual(my_answer.private, '0' * 10)
self.assertEqual(my_answer.repport, '')
def test_initialization_failed(self):
my_telium_instance = Telium(self._fake_device.s_name)
# Construct our payment infos
my_payment = TeliumAsk(
'1', # Checkout ID 1
TERMINAL_ANSWER_SET_SMALLSIZED, # Ask for fullsized repport
TERMINAL_MODE_PAYMENT_DEBIT, # Ask for debit
TERMINAL_TYPE_PAYMENT_CARD, # Using a card
TERMINAL_NUMERIC_CURRENCY_EUR, # Set currency to EUR
TERMINAL_REQUEST_ANSWER_WAIT_FOR_TRANSACTION, # Do not wait for transaction end for terminal answer
TERMINAL_FORCE_AUTHORIZATION_DISABLE, # Let device choose if we should ask for authorization
91.1 # Ask for 12.5 EUR
)
with self.assertRaises(TerminalInitializationFailedException):
my_telium_instance.ask(my_payment)
if __name__ == '__main__':
main()
|
proxy.py
|
import sys
import socket
import threading
# this is a pretty hex dumping function directly taken from
# http://code.activestate.com/recipes/142812-hex-dumper/
def hexdump(src, length=16):
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = b' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append( b"%04X %-*s %s" % (i, length*(digits + 1), hexa, text) )
print b'\n'.join(result)
def receive_from(connection):
buffer = ""
# We set a 2 second time out depending on your
# target this may need to be adjusted
connection.settimeout(2)
try:
# keep reading into the buffer until there's no more data
# or we time out
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
# modify any requests destined for the remote host
def request_handler(buffer):
# perform packet modifications
return buffer
# modify any responses destined for the local host
def response_handler(buffer):
# perform packet modifications
return buffer
def proxy_handler(client_socket, remote_host, remote_port, receive_first):
# connect to the remote host
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
# receive data from the remote end if necessary
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
# send it to our response handler
remote_buffer = response_handler(remote_buffer)
# if we have data to send to our local client send it
if len(remote_buffer):
print "[<==] Sending %d bytes to localhost." % len(remote_buffer)
client_socket.send(remote_buffer)
# now let's loop and reading from local, send to remote, send to local
# rinse wash repeat
while True:
# read from local host
local_buffer = receive_from(client_socket)
if len(local_buffer):
print "[==>] Received %d bytes from localhost." % len(local_buffer)
hexdump(local_buffer)
# send it to our request handler
local_buffer = request_handler(local_buffer)
# send off the data to the remote host
remote_socket.send(local_buffer)
print "[==>] Sent to remote."
# receive back the response
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print "[<==] Received %d bytes from remote." % len(remote_buffer)
hexdump(remote_buffer)
# send to our response handler
remote_buffer = response_handler(remote_buffer)
# send the response to the local socket
client_socket.send(remote_buffer)
print "[<==] Sent to localhost."
# if no more data on either side close the connections
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print "[*] No more data. Closing connections."
break
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host,local_port))
except:
print "[!!] Failed to listen on %s:%d" % (local_host,local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host,local_port)
server.listen(5)
while True:
client_socket, addr = server.accept()
# print out the local connection information
print "[==>] Received incoming connection from %s:%d" % (addr[0],addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler,args=(client_socket,remote_host,remote_port,receive_first))
proxy_thread.start()
def main():
# no fancy command line parsing here
if len(sys.argv[1:]) != 5:
print "Usage: ./proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]"
print "Example: ./proxy.py 127.0.0.1 9000 10.12.132.1 9000 True"
sys.exit(0)
# setup local listening parameters
local_host = sys.argv[1]
local_port = int(sys.argv[2])
# setup remote target
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
# this tells our proxy to connect and receive data
# before sending to the remote host
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else:
receive_first = False
# now spin up our listening socket
server_loop(local_host,local_port,remote_host,remote_port,receive_first)
main()
|
run.py
|
from OnlineHeart import OnlineHeart
from Silver import Silver
from LotteryResult import LotteryResult
from Tasks import Tasks
from connect import connect
from rafflehandler import Rafflehandler
import asyncio
from login import login
from printer import Printer
from statistics import Statistics
from bilibili import bilibili
import threading
import biliconsole
from pkLottery import PKLottery
loop = asyncio.get_event_loop()
loop1 = asyncio.get_event_loop()
printer = Printer()
bilibili()
Statistics()
rafflehandler = Rafflehandler()
biliconsole.Biliconsole()
task = OnlineHeart()
task1 = Silver()
task2 = Tasks()
task4 = connect()
task5 = PKLottery()
tasks1 = [
login().login_new()
]
loop.run_until_complete(asyncio.wait(tasks1))
console_thread = threading.Thread(target=biliconsole.controler)
console_thread.start()
tasks = [
task.run(),
task1.run(),
task2.run(),
biliconsole.Biliconsole().run(),
task4.create(),
rafflehandler.run(),
task5.run()
]
loop.run_until_complete(asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION))
loop.close()
console_thread.join()
|
clientserver.py
|
#!/usr/bin/python3.4
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic server builder for creating networked client-server protocols that are
text based. Provides both a thread based and subprocess based handler model.
Intended to simplify simple, low-volume client-server applications.
"""
__all__ = [
'ForkingModel', 'ThreadProcessModel', 'SyncronousModel', 'StreamWorker',
'DatagramWorker', 'TCPWorker', 'UDPWorker', 'UnixStreamWorker',
'UnixDatagramWorker', 'Server', 'StreamServer', 'DatagramServer',
'TCPServer', 'UDPServer', 'UnixStreamServer', 'UnixDatagramServer',
'TCPClient', 'UnixStreamClient', 'UDPClient', 'UnixDatagramClient',
'get_client', 'get_server']
import sys
from pycopia import socket_functions
from pycopia import protocols
class ProcessModel:
"""Determines process model to use for workers."""
def __call__(self, func):
raise NotImplementedError
class ForkingModel(ProcessModel):
"""Fork new handlers as submethod. This is the default process model."""
def __init__(self, pwent=None):
from pycopia import proctools
self._procmanager = proctools.get_procmanager()
self.pwent = pwent
def __call__(self, func, args=None, kwargs=None):
self._procmanager.submethod(func,
args=args or (), kwargs=kwargs or {},
pwent=self.pwent)
class ThreadProcessModel(ProcessModel):
"""This process model uses threads. TODO This is not complete."""
def __init__(self):
import threading
self.Thread = threading.Thread
def __call__(self, func, args=None, kwargs=None):
t = self.Thread(target=func, args=args or (), kwargs=kwargs or {})
t.start()
class SyncronousModel(ProcessModel):
"""For simple, synchronous applications requiring only one handler at a
time.
"""
def __call__(self, func, args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
return func(*args, **kwargs)
# worker classes for servers:
class BaseWorker:
PORT = None
PATH = None
def __init__(self, sock, addr, protocol):
self._sock = sock
sock.setblocking(False)
sock.settimeout(0.0)
self.address = addr
self.protocol = protocol
def __del__(self):
self.close()
def close(self):
if self._sock is not None:
self._sock.close()
self._sock = None
def __call__(self):
raise NotImplementedError(
"Worker objects should be callable. override in subclass.")
def initialize(self):
pass
def finalize(self):
pass
class StreamWorker(BaseWorker):
EOL = b"\n"
def __call__(self):
fo = self._sock.makefile("rwb", 0)
self.initialize()
rv = self.run(fo)
self.finalize()
fo.flush()
fo.close()
return rv
def run(self, stream):
try:
self.protocol.run(stream, self.address)
except protocols.ProtocolExit:
return True
# datagram workers:
class DatagramWorker(BaseWorker):
def __call__(self, data):
self.initialize()
self.run(data)
self.finalize()
def run(self, stream):
try:
self.protocol.run(stream, self.address)
except protocols.ProtocolExit:
return True
# Real worker classes you can use.
class TCPWorker(StreamWorker):
pass
class UDPWorker(DatagramWorker):
pass
class UnixStreamWorker(StreamWorker):
pass
class UnixDatagramWorker(DatagramWorker):
pass
# server objects:
class Server:
"""Base class for all servers."""
PORT = None # define in subclass
_sock = None
def fileno(self):
return self._sock.fileno()
def __del__(self):
self.close()
def close(self):
if self._sock is not None:
self._sock.close()
self._sock = None
def run(self):
try:
while 1:
self.accept()
except KeyboardInterrupt:
return
class StreamServer(Server):
def accept(self):
conn, addr = self._sock.accept()
worker = self.workerclass(conn, addr, self.protocol)
try:
if self.debug:
return worker()
else:
self._procmanager(worker)
finally:
conn.close()
class DatagramServer(Server):
def accept(self): # TODO test this
data, addr = self._sock.recvfrom(4096)
worker = self.workerclass(self._sock, addr, self.protocol)
if self.debug:
worker(data)
else:
worker(data)
class TCPServer(StreamServer):
PORT = None
def __init__(self, workerclass, protocol, port=None, host=None,
processmodel=None, debug=False):
port = port or workerclass.PORT or self.PORT
host = host or ""
self._procmanager = processmodel or ForkingModel()
self.workerclass = workerclass
self.protocol = protocol
self.debug = debug
self._sock = socket_functions.tcp_listener((host, port), 5)
_host, self.server_port = self._sock.getsockname()
self.server_name = socket_functions.getfqdn(_host)
class UDPServer(DatagramServer):
PORT = None
def __init__(self, workerclass, protocol, port=None, host=None,
debug=False):
port = port or workerclass.PORT or self.PORT
host = host or ""
self._sock = socket_functions.udp_listener((host, port))
self.workerclass = workerclass
self.protocol = protocol
self.debug = debug
class UnixStreamServer(StreamServer):
PATH = "/tmp/_UnixStream"
def __init__(self, workerclass, protocol, path=None, processmodel=None,
debug=False):
path = path or workerclass.PATH or self.PATH
self._sock = socket_functions.unix_listener(path)
self.workerclass = workerclass
self.protocol = protocol
self._procmanager = processmodel or ForkingModel()
self.debug = debug
class UnixDatagramServer(DatagramServer):
PATH = "/tmp/_UnixDatagram"
def __init__(self, workerclass, protocol, path=None, debug=False):
path = path or workerclass.PATH or self.PATH
self._sock = socket_functions.unix_listener(path)
self.workerclass = workerclass
self.protocol = protocol
self.debug = debug
# Client side base classes
class _BaseClient:
_sock = None
def __del__(self):
self.close()
def close(self):
if self._sock is not None:
self._sock.close()
self._sock = None
def run(self):
try:
self.protocol.run(self)
except protocols.ProtocolExit:
return True
class _StreamClient(_BaseClient):
EOL = b"\n"
def __init__(self, sock, protocol, logfile=None):
self._sock = sock.makefile("rwb", 0)
sock.close()
self.protocol = protocol
self._logfile = logfile
def readline(self):
data = self._sock.readline()
if self._logfile:
self._logfile.write(data)
return data
def read(self, n):
data = self._sock.read(n)
if self._logfile:
self._logfile.write(data)
return data
def write(self, data):
if self._logfile:
self._logfile.write(data)
return self._sock.write(data)
class _DatagramClient(_BaseClient):
def __init__(self, sock, protocol, logfile=None):
self._sock = sock
self.protocol = protocol
self._logfile = logfile
def readline(self):
data, addr = self._sock.recvfrom(4096)
if self._logfile:
self._logfile.write(data)
return data
def write(self, data):
return self._sock.send(data)
class TCPClient(_StreamClient):
"""A client side of a TCP protocol."""
PORT = 9999
def __init__(self, host, protocol, port=None, logfile=None):
self._sock = None
port = port or self.PORT
sock = socket_functions.connect_tcp(host, port)
self.host = host
self.port = port
super(TCPClient, self).__init__(sock, protocol, logfile)
class UnixStreamClient(_StreamClient):
"""A client side of a UNIX socket protocol."""
PATH = "/tmp/_UnixStream"
def __init__(self, protocol, path=None, logfile=None):
self._sock = None
if path is None:
path = self.PATH
sock = socket_functions.connect_unix(path)
super(UnixStreamClient, self).__init__(sock, protocol, logfile)
class UDPClient(_DatagramClient):
"""A client side of a UDP protocol."""
PORT = 9999
def __init__(self, host, protocol, port=None, logfile=None):
self._sock = None
port = port or self.PORT
sock = socket_functions.connect_udp(host, port)
super(UDPClient, self).__init__(sock, protocol, logfile)
class UnixDatagramClient(_DatagramClient):
"""A client side of a UNIX datagram protocol."""
PATH = "/tmp/_UnixDatagram"
def __init__(self, protocol, path=None, logfile=None):
self._sock = None
if path is None:
path = self.PATH
sock = socket_functions.connect_unix_datagram(path)
super(UnixDatagramClient, self).__init__(sock, protocol, logfile)
class DefaultProtocol(protocols.Protocol):
"""Default and example of constructing a protcol."""
def initialize(self):
states = self.states
states.set_default_transition(self._error, states.RESET)
def _error(self, matchobject):
print("Error: symbol: {}, from: {}".format(matchobject.string,
self.states.current_state),
file=sys.stderr)
# helper to import a named object
def _get_class(name):
parts = name.split(".")
modname = ".".join(parts[:-1])
__import__(modname)
return getattr(sys.modules[modname], parts[-1])
def get_client(name, dest, protocol, port=None, logfile=None):
"""Factory function for getting a proper client object.
Provide the name of the client class, proper destination address, and
protocol object.
"""
if type(name) is str:
clientclass = _get_class(name)
elif type(name) is type:
clientclass = name
else:
raise ValueError("invalid object for 'name' parameter: %s" % (name,))
assert issubclass(clientclass, _BaseClient), "need some Client type"
assert isinstance(protocol, protocols.Protocol), "need protocol type"
if issubclass(clientclass, TCPClient):
return clientclass(dest, protocol, port=port, logfile=logfile)
if issubclass(clientclass, UnixStreamClient):
return clientclass(protocol, path=dest, logfile=logfile)
if issubclass(clientclass, UDPClient):
return clientclass(dest, protocol, port=port, logfile=logfile)
if issubclass(clientclass, UnixDatagramClient):
return clientclass(protocol, path=dest, logfile=logfile)
def get_server(name, protocol, host=None, port=None, path=None, debug=False):
"""General factory for server worker.
Give the pathname of a worker class object.
Returns the appropriate type of server for it.
"""
if type(name) is str:
workerclass = _get_class(name)
elif type(name) is type:
workerclass = name
else:
raise ValueError("invalid object for 'name' parameter: %s" % (name,))
assert issubclass(workerclass, BaseWorker), "need BaseWorker type"
if issubclass(workerclass, TCPWorker):
srv = TCPServer(workerclass, protocol, port=port, host=host,
debug=debug)
elif issubclass(workerclass, UDPWorker):
srv = UDPServer(workerclass, protocol, port=port, host=host,
debug=debug)
elif issubclass(workerclass, UnixStreamWorker):
srv = UnixStreamServer(workerclass, protocol, path=path, debug=debug)
elif issubclass(workerclass, UnixDatagramWorker):
srv = UnixDatagramServer(workerclass, protocol, path=path, debug=debug)
else:
raise ValueError("get_server: Could not get server")
return srv
|
__init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import bisect
import difflib
import gc
import http.client
import hashlib
import heapq
import lz4.frame
import math
import mmap
import operator
import os
import re
import sys
import tempfile
import threading
import time
import xxhash
import numpy as np
import uuid
from annoy import AnnoyIndex
from copy import deepcopy
from fasteners import InterProcessLock
from itertools import cycle, islice, chain, product, tee
from numbers import Number
from time import sleep
from pymagnitude.converter_shared import DEFAULT_NGRAM_END
from pymagnitude.converter_shared import BOW, EOW
from pymagnitude.converter_shared import CONVERTER_VERSION
from pymagnitude.converter_shared import fast_md5_file
from pymagnitude.converter_shared import char_ngrams
from pymagnitude.converter_shared import norm_matrix
from pymagnitude.converter_shared import unroll_elmo
from pymagnitude.converter_shared import KeyList
from pymagnitude.third_party.repoze.lru import lru_cache
try:
from itertools import imap
except ImportError:
imap = map
try:
from itertools import izip
except ImportError:
izip = zip
try:
unicode
except NameError:
unicode = str
try:
from http.client import CannotSendRequest, ResponseNotReady
except BaseException:
from httplib import CannotSendRequest, ResponseNotReady
try:
from urllib.request import urlretrieve
except BaseException:
from urllib import urlretrieve
try:
from urllib.parse import urlparse
except BaseException:
from urlparse import urlparse
try:
xrange
except NameError:
xrange = range
# Import AllenNLP
sys.path.append(os.path.dirname(__file__) + '/third_party/')
sys.path.append(os.path.dirname(__file__) + '/third_party_mock/')
from pymagnitude.third_party.allennlp.commands.elmo import ElmoEmbedder
# Import SQLite
try:
sys.path.append(os.path.dirname(__file__) + '/third_party/')
sys.path.append(os.path.dirname(__file__) + '/third_party/internal/')
from pymagnitude.third_party.internal.pysqlite2 import dbapi2 as sqlite3
db = sqlite3.connect(':memory:')
db.close()
_SQLITE_LIB = 'internal'
except Exception:
import sqlite3
_SQLITE_LIB = 'system'
# Import SQLite (APSW)
try:
import pymagnitude.third_party.internal.apsw as apsw
db = apsw.Connection(':memory:')
db.close()
_APSW_LIB = 'internal'
except Exception:
_APSW_LIB = 'none'
DEFAULT_LRU_CACHE_SIZE = 1000
def _sqlite_try_max_variable_number(num):
""" Tests whether SQLite can handle num variables """
db = sqlite3.connect(':memory:')
try:
db.cursor().execute(
"SELECT 1 IN (" + ",".join(["?"] * num) + ")",
([0] * num)
).fetchall()
return num
except BaseException:
return -1
finally:
db.close()
# Log function
def _log(*args):
args = list(args)
args[0] = "[Magnitude] " + args[0]
if not _log.disable_message:
print("[Magnitude] Magnitude is logging messages for slow "
"operations to standard error. To turn this"
" off pass log=False to the Magnitude "
"constructor.", file=sys.stderr)
_log.disable_message = True
print(*args, file=sys.stderr)
_log.disable_message = False
class Magnitude(object):
SQLITE_LIB = _SQLITE_LIB
APSW_LIB = _APSW_LIB
NGRAM_BEG = 1
NGRAM_END = DEFAULT_NGRAM_END
BOW = BOW
EOW = EOW
RARE_CHAR = u"\uF002".encode('utf-8')
FTS_SPECIAL = set('*^')
MMAP_THREAD_LOCK = {}
OOV_RNG_LOCK = threading.Lock()
SQLITE_MAX_VARIABLE_NUMBER = max(max((_sqlite_try_max_variable_number(n)
for n in [99, 999, 9999, 99999])), 1)
MAX_KEY_LENGTH_FOR_STEM = 150
MAX_KEY_LENGTH_FOR_OOV_SIM = 1000
ENGLISH_PREFIXES = ['counter', 'electro', 'circum', 'contra', 'contro',
'crypto', 'deuter', 'franco', 'hetero', 'megalo',
'preter', 'pseudo', 'after', 'under', 'amphi',
'anglo', 'astro', 'extra', 'hydro', 'hyper', 'infra',
'inter', 'intra', 'micro', 'multi', 'ortho', 'paleo',
'photo', 'proto', 'quasi', 'retro', 'socio', 'super',
'supra', 'trans', 'ultra', 'anti', 'back', 'down',
'fore', 'hind', 'midi', 'mini', 'over', 'post',
'self', 'step', 'with', 'afro', 'ambi', 'ante',
'anti', 'arch', 'auto', 'cryo', 'demi', 'demo',
'euro', 'gyro', 'hemi', 'homo', 'hypo', 'ideo',
'idio', 'indo', 'macr', 'maxi', 'mega', 'meta',
'mono', 'mult', 'omni', 'para', 'peri', 'pleo',
'poly', 'post', 'pros', 'pyro', 'semi', 'tele',
'vice', 'dis', 'dis', 'mid', 'mis', 'off', 'out',
'pre', 'pro', 'twi', 'ana', 'apo', 'bio', 'cis',
'con', 'com', 'col', 'cor', 'dia', 'dis', 'dif',
'duo', 'eco', 'epi', 'geo', 'im ', 'iso', 'mal',
'mon', 'neo', 'non', 'pan', 'ped', 'per', 'pod',
'pre', 'pro', 'pro', 'sub', 'sup', 'sur', 'syn',
'syl', 'sym', 'tri', 'uni', 'be', 'by', 'co', 'de',
'en', 'em', 'ex', 'on', 're', 'un', 'un', 'up', 'an',
'an', 'ap', 'bi', 'co', 'de', 'di', 'di', 'du', 'en',
'el', 'em', 'ep', 'ex', 'in', 'in', 'il', 'ir', 'sy',
'a', 'a', 'a']
ENGLISH_PREFIXES = sorted(
chain.from_iterable([(p + '-', p) for p in ENGLISH_PREFIXES]),
key=lambda x: len(x), reverse=True)
ENGLISH_SUFFIXES = ['ification', 'ologist', 'ology', 'ology', 'able',
'ible', 'hood', 'ness', 'less', 'ment', 'tion',
'logy', 'like', 'ise', 'ize', 'ful', 'ess', 'ism',
'ist', 'ish', 'ity', 'ant', 'oid', 'ory', 'ing', 'fy',
'ly', 'al']
ENGLISH_SUFFIXES = sorted(
chain.from_iterable([('-' + s, s) for s in ENGLISH_SUFFIXES]),
key=lambda x: len(x), reverse=True)
def __new__(cls, *args, **kwargs):
""" Returns a concatenated magnitude object, if Magnitude parameters """
if len(args) > 0 and isinstance(args[0], Magnitude):
obj = object.__new__(ConcatenatedMagnitude, *args, **kwargs)
obj.__init__(*args, **kwargs)
else:
obj = object.__new__(cls)
return obj
"""A Magnitude class that interfaces with the underlying SQLite
data store to provide efficient access.
Attributes:
path: The file path or URL to the magnitude file
stream: Stream the URL instead of downloading it
stream_options: Options to control the behavior of the streaming
lazy_loading: -1 = pre-load into memory, 0 = lazy loads with unbounded
in-memory cache, >0 lazy loads with an LRU cache of that
size
blocking: Even when lazy_loading is -1, the constructor will not block
it will instead pre-load into memory in a background thread,
if blocking is set to True, it will block until everything
is pre-loaded into memory
normalized: Returns unit normalized vectors
use_numpy: Returns a NumPy array if True or a list if False
case_insensitive: Searches for keys with case-insensitive search
pad_to_length: Pads to a certain length if examples are shorter than
that length or truncates if longer than that length.
truncate_left: if something needs to be truncated to the padding,
truncate off the left side
pad_left: Pads to the left.
placeholders: Extra empty dimensions to add to the vectors.
ngram_oov: Use character n-grams for generating out-of-vocabulary
vectors.
supress_warnings: Supress warnings generated
batch_size: Controls the maximum vector size used in memory directly
eager: Start loading non-critical resources in the background in
anticipation they will be used.
language: A ISO 639-1 Language Code (default: English 'en')
dtype: The dtype to use when use_numpy is True.
devices: A list of GPU device ids.
temp_dir: The directory Magnitude will use as its temporary directory
log: Enable log messages from Magnitude
_number_of_values: When the path is set to None and Magnitude is being
used to solely featurize keys directly into vectors,
_number_of_values should be set to the
approximate upper-bound of the number of keys
that will be looked up with query(). If you don't know
the exact number, be conservative and pick a large
number, while keeping in mind the bigger
_number_of_values is, the more memory it will consume.
_namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, path, stream=False, stream_options=None,
lazy_loading=0, blocking=False, normalized=None,
use_numpy=True, case_insensitive=False,
pad_to_length=None, truncate_left=False,
pad_left=False, placeholders=0, ngram_oov=None,
supress_warnings=False, batch_size=3000000,
eager=None, language='en', dtype=np.float32,
devices=[], temp_dir=tempfile.gettempdir(),
log=None, _namespace=None,
_number_of_values=1000000):
"""Initializes a new Magnitude object."""
self.sqlite_lib = Magnitude.SQLITE_LIB
self.apsw_lib = Magnitude.APSW_LIB
self.closed = False
self.uid = str(uuid.uuid4()).replace("-", "")
self.stream = stream
self.stream_options = stream_options or {}
if self.stream:
if self.apsw_lib != 'internal':
raise RuntimeError(
"""You are trying to stream a model, but the
installation of Magnitude has partially failed so this
component will not work. Please try re-installing or create
a GitHub issue to further debug.""")
self.driver = apsw
self.http_vfs = HTTPVFS(options=self.stream_options)
download_vfs_options = deepcopy(self.stream_options)
download_vfs_options.update({
'sequential_cache_max_read': 500 * (1024 ** 2),
})
self.http_download_vfs = HTTPVFS(vfsname='http_download',
options=download_vfs_options)
else:
self.driver = sqlite3
self.fd = None
if path is None:
self.memory_db = True
self.path = ":memory:"
else:
self.memory_db = False
self.path = (
os.path.expanduser(path)
if not self.stream else MagnitudeUtils.download_model(
path, _download=False, _local=True))
self._all_conns = []
self.lazy_loading = lazy_loading
self.use_numpy = use_numpy
self.case_insensitive = case_insensitive
self.pad_to_length = pad_to_length
self.truncate_left = truncate_left
self.pad_left = pad_left
self.placeholders = placeholders
self.supress_warnings = supress_warnings
self.batch_size = batch_size
if eager is None:
self.eager = not(self.stream)
else:
self.eager = eager
self.language = language and language.lower()
self.dtype = dtype
if isinstance(devices, list):
self.devices = devices
else:
self.devices = [devices]
self.temp_dir = temp_dir
if log is None:
self.log = True if self.stream else log
else:
self.log = log
self._namespace = _namespace
self._number_of_values = _number_of_values
# Define conns and cursors store
self._conns = {}
self._cursors = {}
self._threads = []
# Convert the input file if not .magnitude
if self.path.endswith('.bin') or \
self.path.endswith('.txt') or \
self.path.endswith('.vec') or \
self.path.endswith('.hdf5'):
if not supress_warnings:
sys.stdout.write(
"""WARNING: You are attempting to directly use a `.bin`,
`.txt`, `.vec`, or `.hdf5` file with Magnitude. The file is being
converted to the `.magnitude` format (which is slow) so
that it can be used with this library. This will happen on
every run / re-boot of your computer. If you want to make
this faster pre-convert your vector model to the
`.magnitude` format with the built-in command utility:
`python -m pymagnitude.converter -i input_file -o output_file`
Refer to the README for more information.
You can pass `supress_warnings=True` to the constructor to
hide this message.""") # noqa
sys.stdout.flush()
from pymagnitude.converter_shared import convert as convert_vector_file # noqa
self.path = convert_vector_file(self.path)
# If the path doesn't exist locally, try a remote download
if not self.stream and not os.path.isfile(
self.path) and not self.memory_db:
self.path = MagnitudeUtils.download_model(
self.path, log=self.log, _local=True)
# Open a read-only file descriptor against the file
if not self.memory_db and not self.stream:
self.fd = os.open(self.path, os.O_RDONLY)
# Get metadata about the vectors
self.length = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='size'") \
.fetchall()[0][0]
version_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='version'") \
.fetchall()
self.version = version_query[0][0] if len(version_query) > 0 else 1
elmo_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='elmo'") \
.fetchall()
self.elmo = len(elmo_query) > 0 and elmo_query[0][0]
if ngram_oov is None:
self.ngram_oov = not(self._is_lm())
else:
self.ngram_oov = ngram_oov
if normalized is None:
self.normalized = not(self._is_lm())
else:
self.normalized = normalized
if not self.normalized:
try:
self._db().execute(
"SELECT magnitude FROM magnitude LIMIT 1")\
.fetchall()
except BaseException:
raise RuntimeError(
"""You are trying to access non-unit-normalized vectors.
However, your .magnitude file version does not support
this. Please re-download a newer .magnitude file for
this model or re-convert it if it is a custom model.""")
if CONVERTER_VERSION < self.version:
raise RuntimeError(
"""The `.magnitude` file you are using was built with a
newer version of Magnitude than your version of Magnitude.
Please update the Magnitude library as it is incompatible
with this particular `.magnitude` file.""") # noqa
self.emb_dim = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='dim'") \
.fetchall()[0][0]
self.precision = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='precision'") \
.fetchall()[0][0]
subword_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword'") \
.fetchall()
self.subword = len(subword_query) > 0 and subword_query[0][0]
if self.subword:
self.subword_start = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_start'")\
.fetchall()[0][0]
self.subword_end = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_end'") \
.fetchall()[0][0]
approx_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx'") \
.fetchall()
self.approx = len(approx_query) > 0 and approx_query[0][0]
if self.approx:
self.approx_trees = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx_trees'")\
.fetchall()[0][0]
self.dim = self.emb_dim + self.placeholders
self.highest_entropy_dimensions = [row[0] for row in self._db().execute(
"SELECT value FROM magnitude_format WHERE key='entropy'")
.fetchall()]
duplicate_keys_query = self._db().execute(
"""SELECT value FROM magnitude_format
WHERE key='max_duplicate_keys'""").fetchall()
self.max_duplicate_keys = len(
duplicate_keys_query) > 0 and duplicate_keys_query[0][0]
if len(duplicate_keys_query) == 0:
duplicate_keys_query = self._db().execute("""
SELECT MAX(key_count)
FROM (
SELECT COUNT(key)
AS key_count
FROM magnitude
GROUP BY key
);
""").fetchall()
self.max_duplicate_keys = (
duplicate_keys_query[0][0] if duplicate_keys_query[0][0] is not None else 1) # noqa
# Iterate to pre-load
def _preload_memory():
if not self.eager: # So that it doesn't loop over the vectors twice
for key, vector in self._iter(put_cache=True, downloader=True):
pass
# Start creating mmap in background
self.setup_for_mmap = False
self._all_vectors = None
self._approx_index = None
self._elmo_embedder = None
if self.eager:
mmap_thread = threading.Thread(target=self.get_vectors_mmap,
args=(False,))
self._threads.append(mmap_thread)
mmap_thread.daemon = True
mmap_thread.start()
if self.approx:
approx_mmap_thread = threading.Thread(
target=self.get_approx_index, args=(False,))
self._threads.append(approx_mmap_thread)
approx_mmap_thread.daemon = True
approx_mmap_thread.start()
if self.elmo:
elmo_thread = threading.Thread(
target=self.get_elmo_embedder, args=(False,))
self._threads.append(elmo_thread)
elmo_thread.daemon = True
elmo_thread.start()
# Create cached methods
if self.lazy_loading <= 0:
@lru_cache(None, real_func=self._vector_for_key, remove_self=True)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(
None,
real_func=self._out_of_vocab_vector,
remove_self=True)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(None, real_func=self._key_for_index, remove_self=True)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.lazy_loading == -1:
if blocking:
_preload_memory()
else:
preload_thread = threading.Thread(target=_preload_memory)
self._threads.append(preload_thread)
preload_thread.daemon = True
preload_thread.start()
elif self.lazy_loading > 0:
@lru_cache(
self.lazy_loading,
real_func=self._vector_for_key,
remove_self=True)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(
self.lazy_loading,
real_func=self._out_of_vocab_vector,
remove_self=True)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(
self.lazy_loading,
real_func=self._key_for_index,
remove_self=True)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.eager and blocking:
self.get_vectors_mmap() # Wait for mmap to be available
if self.approx:
self.get_approx_index() # Wait for approx mmap to be available
if self.elmo:
self.get_elmo_embedder() # Wait for approx mmap to be available
def _setup_for_mmap(self):
# Setup variables for get_vectors_mmap()
self._all_vectors = None
self._approx_index = None
self._elmo_embedder = None
if not self.memory_db:
self.db_hash = fast_md5_file(self.path, stream=self.stream)
else:
self.db_hash = self.uid
self.md5 = hashlib.md5(",".join(
[self.path, self.db_hash, str(self.length),
str(self.dim), str(self.precision), str(self.case_insensitive)
]).encode('utf-8')).hexdigest()
self.path_to_mmap = os.path.join(self.temp_dir,
self.md5 + '.magmmap')
self.path_to_approx_mmap = os.path.join(self.temp_dir,
self.md5 + '.approx.magmmap')
self.path_to_elmo_w_mmap = os.path.join(self.temp_dir,
self.md5 + '.elmo.hdf5.magmmap')
self.path_to_elmo_o_mmap = os.path.join(self.temp_dir,
self.md5 + '.elmo.json.magmmap')
if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock()
if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \
threading.Lock()
if self.path_to_elmo_w_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_w_mmap] = \
threading.Lock()
if self.path_to_elmo_o_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_o_mmap] = \
threading.Lock()
self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap]
self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock')
self.APPROX_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap]
self.APPROX_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_approx_mmap + '.lock')
self.ELMO_W_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_w_mmap]
self.ELMO_W_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_elmo_w_mmap + '.lock')
self.ELMO_O_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_o_mmap]
self.ELMO_O_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_elmo_o_mmap + '.lock')
self.setup_for_mmap = True
def sqlite3_connect(self, downloader, *args, **kwargs):
"""Returns a sqlite3 connection."""
if (self.driver != sqlite3):
if 'check_same_thread' in kwargs:
del kwargs['check_same_thread']
if self.stream:
if downloader:
kwargs['vfs'] = self.http_download_vfs.vfsname
else:
kwargs['vfs'] = self.http_vfs.vfsname
kwargs['flags'] = self.driver.SQLITE_OPEN_READONLY
return self.driver.Connection(*args, **kwargs)
else:
return self.driver.connect(*args, **kwargs)
def _db(self, force_new=False, downloader=False):
"""Returns a cursor to the database. Each thread gets its
own cursor.
"""
identifier = threading.current_thread().ident
conn_exists = identifier in self._cursors
if not conn_exists or force_new:
if self.fd:
if os.name == 'nt':
conn = self.sqlite3_connect(downloader, self.path,
check_same_thread=False)
else:
conn = self.sqlite3_connect(downloader,
'/dev/fd/%d' % self.fd,
check_same_thread=False)
elif self.stream:
conn = self.sqlite3_connect(downloader,
self.path, check_same_thread=False)
else:
conn = self.sqlite3_connect(downloader,
self.path, check_same_thread=False)
self._create_empty_db(conn.cursor())
self._all_conns.append(conn)
if not conn_exists:
self._conns[identifier] = conn
self._cursors[identifier] = conn.cursor()
elif force_new:
return conn.cursor()
return self._cursors[identifier]
def _create_empty_db(self, db):
# Calculates the number of dimensions needed to prevent hashing from
# creating a collision error of a certain value for the number of
# expected feature values being hashed
collision_error_allowed = .001
number_of_dims = max(math.ceil(math.log(
((self._number_of_values ** 2) / (-2 * math.log(-collision_error_allowed + 1))), 100)), 2) # noqa
db.execute("DROP TABLE IF EXISTS `magnitude`;")
db.execute("""
CREATE TABLE `magnitude` (
key TEXT COLLATE NOCASE,
magnitude REAL
);
""")
db.execute("""
CREATE TABLE `magnitude_format` (
key TEXT COLLATE NOCASE,
value INTEGER
);
""")
insert_format_query = """
INSERT INTO `magnitude_format`(
key,
value
)
VALUES (
?, ?
);
"""
db.execute(insert_format_query, ('size', 0))
db.execute(insert_format_query, ('dim', number_of_dims))
db.execute(insert_format_query, ('precision', 0))
def _padding_vector(self):
"""Generates a padding vector."""
if self.use_numpy:
return np.zeros((self.dim,), dtype=self.dtype)
else:
return [0.0] * self.dim
def _key_t(self, key):
"""Transforms a key to lower case depending on case
sensitivity.
"""
if self.case_insensitive and (isinstance(key, str) or
isinstance(key, unicode)):
return key.lower()
return key
def _string_dist(self, a, b):
length = max(len(a), len(b))
return length - difflib.SequenceMatcher(None, a, b).ratio() * length
def _key_shrunk_2(self, key):
"""Shrinks more than two characters to two characters
"""
return re.sub(r"([^<])\1{2,}", r"\1\1", key)
def _key_shrunk_1(self, key):
"""Shrinks more than one character to a single character
"""
return re.sub(r"([^<])\1+", r"\1", key)
def _oov_key_t(self, key):
"""Transforms a key for out-of-vocabulary lookup.
"""
is_str = isinstance(key, str) or isinstance(key, unicode)
if is_str:
key = Magnitude.BOW + self._key_t(key) + Magnitude.EOW
return is_str, self._key_shrunk_2(key)
return is_str, key
def _oov_english_stem_english_ixes(self, key):
"""Strips away common English prefixes and suffixes."""
key_lower = key.lower()
start_idx = 0
end_idx = 0
for p in Magnitude.ENGLISH_PREFIXES:
if key_lower[:len(p)] == p:
start_idx = len(p)
break
for s in Magnitude.ENGLISH_SUFFIXES:
if key_lower[-len(s):] == s:
end_idx = len(s)
break
start_idx = start_idx if max(start_idx, end_idx) == start_idx else 0
end_idx = end_idx if max(start_idx, end_idx) == end_idx else 0
stripped_key = key[start_idx:len(key) - end_idx]
if len(stripped_key) < 4:
return key
elif stripped_key != key:
return self._oov_english_stem_english_ixes(stripped_key)
else:
return stripped_key
def _oov_stem(self, key):
"""Strips away common prefixes and suffixes."""
if len(key) <= Magnitude.MAX_KEY_LENGTH_FOR_STEM:
if self.language == 'en':
return self._oov_english_stem_english_ixes(key)
return key
def _db_query_similar_keys_vector(
self, key, orig_key, topn=3, normalized=None):
"""Finds similar keys in the database and gets the mean vector."""
normalized = normalized if normalized is not None else self.normalized
def _sql_escape_single(s):
return s.replace("'", "''")
def _sql_escape_fts(s):
return ''.join("\\" + c if c in Magnitude.FTS_SPECIAL
else c for c in s).replace('"', '""')
exact_search_query = """
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE NOCASE DESC
LIMIT ?;
"""
if self.subword and len(key) < Magnitude.MAX_KEY_LENGTH_FOR_OOV_SIM:
current_subword_start = self.subword_end
BOW_length = len(Magnitude.BOW) # noqa: N806
EOW_length = len(Magnitude.EOW) # noqa: N806
BOWEOW_length = BOW_length + EOW_length # noqa: N806
true_key_len = len(key) - BOWEOW_length
key_shrunk_stemmed = self._oov_stem(self._key_shrunk_1(orig_key))
key_shrunk = self._key_shrunk_1(orig_key)
key_stemmed = self._oov_stem(orig_key)
beginning_and_end_clause = ""
exact_matches = []
if true_key_len <= 6:
beginning_and_end_clause = """
magnitude.key LIKE '{0}%'
AND LENGTH(magnitude.key) <= {2} DESC,
magnitude.key LIKE '%{1}'
AND LENGTH(magnitude.key) <= {2} DESC,"""
beginning_and_end_clause = beginning_and_end_clause.format(
_sql_escape_single(key[BOW_length:BOW_length + 1]),
_sql_escape_single(key[-EOW_length - 1:-EOW_length]),
str(true_key_len))
if key != orig_key:
exact_matches.append((key_shrunk, self._key_shrunk_2(orig_key)))
if key_stemmed != orig_key:
exact_matches.append((key_stemmed,))
if key_shrunk_stemmed != orig_key:
exact_matches.append((key_shrunk_stemmed,))
if len(exact_matches) > 0:
for exact_match in exact_matches:
results = []
split_results = []
limits = np.array_split(list(range(topn)), len(exact_match))
for i, e in enumerate(exact_match):
limit = len(limits[i])
split_results.extend(self._db().execute(
exact_search_query, (e, e, limit)).fetchall())
results.extend(self._db().execute(
exact_search_query, (e, e, topn)).fetchall())
if len(split_results) >= topn:
results = split_results
if len(results) > 0:
break
else:
results = []
if len(results) == 0:
search_query = """
SELECT magnitude.*
FROM magnitude_subword, magnitude
WHERE char_ngrams MATCH ?
AND magnitude.rowid = magnitude_subword.rowid
ORDER BY
(
(
LENGTH(offsets(magnitude_subword)) -
LENGTH(
REPLACE(offsets(magnitude_subword), ' ', '')
)
)
+
1
) DESC,
""" + beginning_and_end_clause + """
LENGTH(magnitude.key) ASC
LIMIT ?;
""" # noqa
while (len(results) < topn and
current_subword_start >= self.subword_start):
ngrams = list(char_ngrams(
key, current_subword_start, current_subword_start))
ngram_limit_map = {
6: 4,
5: 8,
4: 12,
}
while current_subword_start in ngram_limit_map and len(
ngrams) > ngram_limit_map[current_subword_start]:
# Reduce the search parameter space by sampling every
# other ngram
ngrams = ngrams[:-1][::2] + ngrams[-1:]
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
small_typo = len(results) > 0 and self._string_dist(
results[0][0].lower(), orig_key.lower()) <= 4
if key_shrunk_stemmed != orig_key and key_shrunk_stemmed != key_shrunk and not small_typo: # noqa
ngrams = list(
char_ngrams(
self._oov_key_t(key_shrunk_stemmed)[1],
current_subword_start,
self.subword_end))
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
current_subword_start -= 1
else:
# As a backup do a search with 'NOCASE'
results = self._db().execute(exact_search_query,
(orig_key, orig_key, topn)).fetchall()
final_results = []
for result in results:
result_key, vec = self._db_full_result_to_vec(
result, normalized=normalized)
final_results.append(vec)
if len(final_results) > 0:
mean_vector = np.mean(final_results, axis=0)
return mean_vector / np.linalg.norm(mean_vector)
else:
return self._padding_vector()
def _seed(self, val):
"""Returns a unique seed for val and the (optional) namespace."""
if self._namespace:
return xxhash.xxh32(
self._namespace.encode('utf-8') +
Magnitude.RARE_CHAR +
val.encode('utf-8')).intdigest()
else:
return xxhash.xxh32(val.encode('utf-8')).intdigest()
def _is_lm(self):
"""Check if using a language model"""
return self.elmo
def _process_lm_output(self, q, normalized):
"""Process the output from a language model"""
zero_d = not(isinstance(q, list))
one_d = not(zero_d) and (len(q) == 0 or not(isinstance(q[0], list)))
if self.elmo:
if zero_d:
r_val = np.concatenate(self.get_elmo_embedder().embed_batch(
[[q]])[0], axis=1).flatten()
elif one_d:
r_val = np.concatenate(self.get_elmo_embedder().embed_batch(
[q])[0], axis=1)
else:
r_val = [np.concatenate(row, axis=1)
for row in self.get_elmo_embedder().embed_batch(q)]
if normalized:
if zero_d:
r_val = r_val / np.linalg.norm(r_val)
elif one_d:
r_val = norm_matrix(r_val)
else:
r_val = [norm_matrix(row) for row in r_val]
if self.placeholders > 0 or self.ngram_oov:
shape_p = list(r_val.shape) if zero_d or one_d else \
([len(r_val)] + list(max((row.shape for row in r_val))))
shape_p[-1] = self.dim
if self.placeholders > 0:
if zero_d or one_d:
r_val_p = np.zeros(shape_p, dtype=self.dtype)
else:
r_val_p = [np.zeros(shape_p[1:], dtype=self.dtype)
for row in r_val]
else:
r_val_p = r_val
if self.ngram_oov:
if zero_d:
lookup = self._vectors_for_keys_cached(
[q], normalized=normalized, force=True)
elif one_d:
lookup = self._vectors_for_keys_cached(
q, normalized=normalized, force=True)
else:
lookup = [None] * len(q)
for row, sq in enumerate(q):
lookup[row] = self._vectors_for_keys_cached(
sq, normalized=normalized, force=True)
for idx in product(*[xrange(s) for s in shape_p[:-1]]):
if zero_d:
key = q
if self.ngram_oov:
vec = r_val if self.__contains__(key) else lookup[0]
else:
vec = r_val
r_val_p[:self.emb_dim] = vec[:self.emb_dim]
elif one_d:
key = q[idx[0]]
if self.ngram_oov:
vec = r_val[idx] if self.__contains__(key) else \
lookup[idx[0]]
else:
vec = r_val[idx]
r_val_p[idx][:self.emb_dim] = vec[:self.emb_dim]
elif idx[1] < len(q[idx[0]]):
key = q[idx[0]][idx[1]]
if self.ngram_oov:
vec = r_val[idx[0]][idx[1]] if self.__contains__(key) \
else lookup[idx[0]][idx[1]]
else:
vec = r_val[idx[0]][idx[1]]
r_val_p[idx[0]][idx[1]][:self.emb_dim] = vec[:self.emb_dim]
r_val = r_val_p
if self.use_numpy:
return r_val
else:
return r_val.tolist()
def _out_of_vocab_vector(self, key, normalized=None, force=False):
"""Generates a random vector based on the hash of the key."""
normalized = normalized if normalized is not None else self.normalized
orig_key = key
is_str, key = self._oov_key_t(key)
if self._is_lm() and is_str and not force:
return self._process_lm_output(key, normalized)
if not is_str:
seed = self._seed(type(key).__name__)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
random_vector[-1] = self.dtype(key) / np.finfo(self.dtype).max
elif not self.ngram_oov or len(key) < Magnitude.NGRAM_BEG:
seed = self._seed(key)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
else:
ngrams = char_ngrams(key, Magnitude.NGRAM_BEG,
Magnitude.NGRAM_END)
random_vectors = []
for i, ngram in enumerate(ngrams):
seed = self._seed(ngram)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vectors.append(
np.random.uniform(-1, 1, (self.emb_dim,)))
Magnitude.OOV_RNG_LOCK.release()
random_vector = np.mean(random_vectors, axis=0)
np.random.seed()
if self.placeholders > 0:
random_vector = np.pad(random_vector, [(0, self.placeholders)],
mode='constant', constant_values=0.0)
if is_str:
random_vector = random_vector / np.linalg.norm(random_vector)
final_vector = (
random_vector *
0.3 +
self._db_query_similar_keys_vector(
key,
orig_key,
normalized=normalized) *
0.7)
if normalized:
final_vector = final_vector / np.linalg.norm(final_vector)
else:
final_vector = random_vector
if self.use_numpy:
return final_vector
else:
return final_vector.tolist()
def _db_batch_generator(self, params):
""" Generates batches of paramaters that respect
SQLite's MAX_VARIABLE_NUMBER """
if len(params) <= Magnitude.SQLITE_MAX_VARIABLE_NUMBER:
yield params
else:
it = iter(params)
for batch in \
iter(lambda: tuple(
islice(it, Magnitude.SQLITE_MAX_VARIABLE_NUMBER)
), ()):
yield batch
def _db_result_to_vec(self, result, normalized=None):
"""Converts a database result to a vector."""
normalized = normalized if normalized is not None else self.normalized
if self.use_numpy:
vec = np.zeros((self.dim,), dtype=self.dtype)
vec[0:self.emb_dim] = result[0:self.emb_dim]
if normalized:
rv = vec / float(10**self.precision)
else:
rv = vec * (float(result[-1]) / float(10**self.precision))
else:
if normalized:
rv = [v / float(10**self.precision)
for v in islice(result, self.emb_dim)] + \
[0.0] * self.placeholders
else:
rv = [v * (float(result[-1]) / float(10**self.precision))
for v in islice(result, self.emb_dim)] + \
[0.0] * self.placeholders
return rv
def _db_full_result_to_vec(self, result, put_cache=True, normalized=None):
"""Converts a full database result to a vector."""
normalized = normalized if normalized is not None else self.normalized
result_key = result[0]
vec = self._db_result_to_vec(result[1:], normalized)
if put_cache:
self._vector_for_key_cached._cache.put(
((result_key,), frozenset([('normalized', normalized)])), vec)
return (result_key, vec)
def _vector_for_key(self, key, normalized=None):
"""Queries the database for a single key."""
normalized = normalized if normalized is not None else self.normalized
result = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE BINARY DESC
LIMIT 1;""",
(key, key)).fetchone()
if result is None or self._key_t(result[0]) != self._key_t(key):
return None
else:
return self._db_result_to_vec(result[1:], normalized)
def _vectors_for_keys_cached(self, keys, normalized=None, force=False):
"""Queries the database for multiple keys."""
normalized = normalized if normalized is not None else self.normalized
if self._is_lm() and not force:
keys = [self._key_t(key) for key in keys]
return self._process_lm_output(keys, normalized)
unseen_keys = tuple(
key for key in keys if not self._query_is_cached(key, normalized))
unseen_keys_map = {}
if len(unseen_keys) > 0:
unseen_keys_map = {self._key_t(k): i for i, k in
enumerate(unseen_keys)}
unseen_vectors = [None] * len(unseen_keys)
seen_keys = set()
for unseen_keys_batch in self._db_batch_generator(unseen_keys):
results = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key
IN (""" + ' ,'.join(['?'] * len(unseen_keys_batch)) +
""");
""",
unseen_keys_batch)
for result in results:
result_key, vec = self._db_full_result_to_vec(
result, normalized=normalized)
result_key_t = self._key_t(result_key)
if result_key_t in unseen_keys_map:
i = unseen_keys_map[result_key_t]
if (
(result_key_t not in seen_keys or
result_key == unseen_keys[i]) and
(
self.case_insensitive or
result_key == unseen_keys[i])
):
seen_keys.add(result_key_t)
unseen_vectors[i] = vec
for i in range(len(unseen_vectors)):
self._vector_for_key_cached._cache.put(
((unseen_keys[i],), frozenset([('normalized', normalized)])), # noqa
unseen_vectors[i])
if unseen_vectors[i] is None:
unseen_vectors[i] = self._out_of_vocab_vector_cached(
unseen_keys[i], normalized, force=force)
vectors = [self.query(key, normalized=normalized)
if key not in unseen_keys_map else
unseen_vectors[unseen_keys_map[self._key_t(key)]]
for key in keys]
return vectors
def _vectors_for_2d_keys(self, keys2d, normalized=None):
"""Queries the database for 2D keys."""
normalized = normalized if normalized is not None else self.normalized
if self._is_lm():
# Only language models benefit from this kind of 2D batching,
# SQLite is slightly faster with more batching, but it also has
# a turning point where that changes
keys2d = [[self._key_t(key) for key in keys] for keys in keys2d]
return self._process_lm_output(keys2d, normalized)
else:
return (self._vectors_for_keys_cached(row, normalized)
for row in keys2d)
def _key_for_index(self, index, return_vector=True):
"""Queries the database the key at a single index."""
columns = "key"
if return_vector:
columns = "*"
result = self._db().execute(
"""
SELECT """ + columns + """
FROM `magnitude`
WHERE rowid = ?
LIMIT 1;
""",
(int(index + 1),)).fetchone()
if result is None:
raise IndexError("The index %d is out-of-range" % index)
else:
if return_vector:
return self._db_full_result_to_vec(
result)
else:
return result[0]
def _keys_for_indices(self, indices, return_vector=True):
"""Queries the database for the keys of multiple indices."""
unseen_indices = tuple(int(index + 1) for index in indices
if self._key_for_index_cached._cache.get(((index,), # noqa
frozenset([('return_vector', return_vector)]))) is None) # noqa
unseen_indices_map = {}
if len(unseen_indices) > 0:
columns = "key"
if return_vector:
columns = "*"
unseen_indices_map = {(index - 1): i for i, index in
enumerate(unseen_indices)}
unseen_keys = [None] * len(unseen_indices)
for unseen_indices_batch in \
self._db_batch_generator(unseen_indices):
results = self._db().execute(
"""
SELECT rowid, """ + columns + """
FROM `magnitude`
WHERE rowid IN (""" +
' ,'.join(['?'] * len(unseen_indices_batch)) +
""");""",
unseen_indices_batch)
for result in results:
i = unseen_indices_map[result[0] - 1]
result_key = result[1]
if return_vector:
unseen_keys[i] = self._db_full_result_to_vec(
result[1:])
else:
unseen_keys[i] = result_key
self._key_for_index_cached._cache.put(
(
(unseen_indices[i] - 1,),
frozenset([('return_vector', return_vector)])
),
unseen_keys[i]
)
for i in range(len(unseen_keys)):
if unseen_keys[i] is None:
raise IndexError("The index %d is out-of-range" %
unseen_indices[i] - 1)
keys = [self.index(index, return_vector=return_vector)
if index not in unseen_indices_map else
unseen_keys[unseen_indices_map[index]] for index in indices]
return keys
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None,
normalized=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
normalized = normalized if normalized is not None else self.normalized
pad_to_length = pad_to_length or self.pad_to_length
pad_left = pad_left or self.pad_left
truncate_left = truncate_left or self.truncate_left
if not isinstance(q, list): # Single key
vec = self._vector_for_key_cached(q, normalized)
if vec is None:
return self._out_of_vocab_vector_cached(q, normalized)
else:
return vec
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
pad_to_length = pad_to_length if pad_to_length else len(q)
padding_length = max(pad_to_length - len(q), 0)
keys_length = pad_to_length - padding_length
vectors = self._vectors_for_keys_cached(q, normalized)
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if self.use_numpy:
tensor = np.zeros((pad_to_length, self.dim), dtype=self.dtype)
else:
tensor = [self._padding_vector() for i in range(pad_to_length)]
if pad_left:
tensor[-keys_length:] = vectors
else:
tensor[0:keys_length] = vectors
return tensor
elif isinstance(q, list): # 2D List
max_q = max([len(subquery) for subquery in q])
pad_to_length = pad_to_length if pad_to_length else max_q
if self.use_numpy:
tensor = np.zeros((len(q), pad_to_length, self.dim),
dtype=self.dtype)
else:
tensor = [[self._padding_vector() for i in range(pad_to_length)]
for j in range(len(q))]
for row, vectors in \
enumerate(self._vectors_for_2d_keys(q, normalized)):
padding_length = max(pad_to_length - len(vectors), 0)
keys_length = pad_to_length - padding_length
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if pad_left:
if self.use_numpy:
tensor[row, -keys_length:] = vectors
else:
tensor[row][-keys_length:] = vectors
else:
if self.use_numpy:
tensor[row, 0:keys_length] = vectors
else:
tensor[row][0:keys_length] = vectors
return tensor
def unroll(self, v):
""" Unrolls a vector if it was concatenated from its base model
form. """
if self.elmo and isinstance(v, np.ndarray):
return unroll_elmo(v, self.placeholders)
else:
return v
def index(self, q, return_vector=True):
"""Gets a key for an index or multiple indices."""
if isinstance(q, list) or isinstance(q, tuple):
return self._keys_for_indices(q, return_vector=return_vector)
else:
return self._key_for_index_cached(q, return_vector=return_vector)
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def _query_numpy(self, key, contextualize=False, normalized=None):
"""Returns the query for a key, forcibly converting the
resulting vector to a numpy array.
"""
normalized = normalized if normalized is not None else self.normalized
key_is_list = isinstance(key, list)
key_len_ge_0 = key_is_list and len(key) > 0
key_0_is_ndarray = key_len_ge_0 and isinstance(key[0], np.ndarray)
if contextualize:
if key_len_ge_0 and key_0_is_ndarray:
contextualize = False
if contextualize:
key = [[sq] for sq in key]
key_is_ndarray = isinstance(key, np.ndarray)
key_is_list = isinstance(key, list)
key_len_ge_0 = key_is_list and len(key) > 0
key_0_is_number = key_len_ge_0 and isinstance(key[0], Number)
key_0_is_ndarray = key_len_ge_0 and isinstance(key[0], np.ndarray)
key_0_is_list = key_len_ge_0 and isinstance(key[0], list)
key_0_len_ge_0 = key_0_is_list and len(key[0]) > 0
key_0_0_is_number = (key_0_is_list and key_0_len_ge_0 and
isinstance(key[0][0], Number))
r_val = None
if (key_is_ndarray or key_0_is_number or key_0_is_ndarray or key_0_0_is_number): # noqa
r_val = key
elif not self.use_numpy:
r_val = np.asarray(self.query(key, normalized=normalized))
else:
r_val = self.query(key, normalized=normalized)
if contextualize:
return np.squeeze(r_val, axis=1)
else:
return r_val
def _query_is_cached(self, key, normalized=None):
"""Checks if the query been cached by Magnitude."""
normalized = normalized if normalized is not None else self.normalized
return ((self._vector_for_key_cached._cache.get((key, frozenset([('normalized', normalized)]))) is not None) or ( # noqa
self._out_of_vocab_vector_cached._cache.get((key, frozenset([('normalized', normalized)]))) is not None)) # noqa
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def distance(self, key, q):
"""Calculates the distance from key to the key(s) in q."""
a = self._query_numpy(key, normalized=self.normalized)
if not isinstance(q, list):
b = self._query_numpy(q, normalized=self.normalized)
return np.linalg.norm(a - b)
else:
return [
np.linalg.norm(
a -
b) for b in self._query_numpy(
q,
contextualize=True,
normalized=self.normalized)]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def similarity(self, key, q):
"""Calculates the similarity from key to the key(s) in q."""
a = self._query_numpy(key, normalized=True)
if not isinstance(q, list):
b = self._query_numpy(q, normalized=True)
return np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
else:
return [np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
for b in self._query_numpy(q,
contextualize=True,
normalized=True)]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_to_given(self, key, q):
"""Calculates the most similar key in q to key."""
similarities = self.similarity(key, q)
min_index, _ = max(enumerate(similarities), key=operator.itemgetter(1))
return q[min_index]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def doesnt_match(self, q):
"""Given a set of keys, figures out which key doesn't
match the rest.
"""
mean_vector = np.mean(self._query_numpy(
q, contextualize=True, normalized=True), axis=0)
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
distances = [
np.linalg.norm(
mean_unit_vector - b
)
for b in self._query_numpy(q, contextualize=True, normalized=True)]
max_index, _ = max(enumerate(distances), key=operator.itemgetter(1))
return q[max_index]
def _db_query_similarity(
self,
positive,
negative,
min_similarity=None,
topn=10,
exclude_keys=set(),
return_similarities=False,
method='distance',
effort=1.0):
"""Runs a database query to find vectors close to vector."""
COSMUL = method == '3cosmul' # noqa: N806
APPROX = method == 'approx' # noqa: N806
DISTANCE = not COSMUL and not APPROX # noqa: N806
exclude_keys = {self._key_t(exclude_key)
for exclude_key in exclude_keys}
if topn is None:
topn = self.length
filter_topn = self.max_duplicate_keys * (topn + len(exclude_keys))
# Find mean unit vector
if (DISTANCE or APPROX) and (len(negative) > 0 or len(positive) > 1):
positive_vecs = np.sum(
self._query_numpy(
positive,
contextualize=True,
normalized=True),
axis=0)
if len(negative) > 0:
negative_vecs = -1.0 * \
np.sum(self._query_numpy(
negative,
contextualize=True,
normalized=True),
axis=0)
else:
negative_vecs = np.zeros((self.dim,), dtype=self.dtype)
mean_vector = (positive_vecs + negative_vecs) / \
float(len(positive) + len(negative))
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
elif (DISTANCE or APPROX):
mean_unit_vector = self._query_numpy(
positive[0], normalized=True)
elif COSMUL:
positive_vecs = self._query_numpy(
positive, contextualize=True, normalized=True)
if len(negative) > 0:
negative_vecs = self._query_numpy(
negative, contextualize=True, normalized=True)
else:
negative_vecs = np.zeros((0, self.dim))
# Calculate topn closest in batches over all vectors
if DISTANCE or COSMUL:
filtered_indices = []
for batch_start, _, batch in \
self.get_vectors_mmap_batch_generator():
if DISTANCE:
similiarities = np.dot(batch, mean_unit_vector)
elif COSMUL:
positive_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in positive_vecs
]
negative_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in negative_vecs
]
similiarities = (
np.prod(positive_similiarities, axis=0) /
(np.prod(negative_similiarities, axis=0) + 0.000001))
partition_results = np.argpartition(similiarities, -1 * min(
filter_topn, self.batch_size, self.length))[-filter_topn:]
for index in partition_results:
if (min_similarity is None or
similiarities[index] >= min_similarity):
if len(filtered_indices) < filter_topn:
heapq.heappush(filtered_indices, (
similiarities[index],
batch_start + index))
elif similiarities[index] > filtered_indices[0][0]:
heapq.heappushpop(filtered_indices, (
similiarities[index],
batch_start + index))
# Get the final topn from all batches
topn_indices = heapq.nlargest(filter_topn, filtered_indices,
key=lambda x: x[0])
topn_indices = iter(topn_indices)
elif APPROX:
approx_index = self.get_approx_index()
search_k = int(effort * filter_topn * self.approx_trees)
nns = approx_index.get_nns_by_vector(
mean_unit_vector,
filter_topn,
search_k=search_k,
include_distances=True)
topn_indices = izip(nns[1], nns[0])
topn_indices = imap(lambda di: (1 - di[0] ** 2 * .5, di[1]),
topn_indices)
# Tee topn_indices iterator
topn_indices_1, topn_indices_2 = tee(topn_indices)
# Retrieve the keys of the vectors
keys = self.index([i[1] for i in topn_indices_1],
return_vector=False)
# Build the result
results = []
for key, similarity in izip(keys, topn_indices_2):
key_t = self._key_t(key)
if len(results) >= topn:
break
if key_t in exclude_keys:
continue
exclude_keys.add(key_t)
if return_similarities:
results.append((key, similarity[0]))
else:
results.append(key)
return results
def _handle_pos_neg_args(self, positive, negative):
if not isinstance(
positive,
list) or (
len(positive) > 0 and isinstance(
positive[0],
Number)):
positive = [positive]
if not isinstance(
negative,
list) or (
len(negative) > 0 and isinstance(
negative[0],
Number)):
negative = [negative]
return positive, negative
def _exclude_set(self, positive, negative):
def _is_vec(elem):
return isinstance(elem, np.ndarray) or \
(isinstance(elem, list) and len(elem) > 0 and
isinstance(elem[0], Number))
return frozenset((elem for elem in chain.from_iterable(
[positive, negative]) if not _is_vec(elem)))
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar(self, positive, negative=[], topn=10, min_similarity=None,
return_similarities=True):
"""Finds the topn most similar vectors under or equal
to max distance.
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
return self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='distance')
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_cosmul(self, positive, negative=[], topn=10,
min_similarity=None, return_similarities=True):
"""Finds the topn most similar vectors under or equal to max
distance using 3CosMul:
[Levy and Goldberg](http://www.aclweb.org/anthology/W14-1618)
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='3cosmul')
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_approx(
self,
positive,
negative=[],
topn=10,
min_similarity=None,
return_similarities=True,
effort=1.0):
"""Approximates the topn most similar vectors under or equal to max
distance using Annoy:
https://github.com/spotify/annoy
"""
if not self.approx:
raise RuntimeError("The `.magnitude` file you are using does not \
support the `most_similar_approx` function. If you are using a pre-built \
`.magnitude` file, visit Magnitude's git repository page's README and download \
the 'Heavy' model instead. If you converted this `.magnitude` file yourself \
you will need to re-convert the file passing the `-a` flag to the converter to \
build the appropriate indexes into the `.magnitude` file.")
positive, negative = self._handle_pos_neg_args(positive, negative)
effort = min(max(0, effort), 1.0)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='approx',
effort=effort)
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def closer_than(self, key, q, topn=None):
"""Finds all keys closer to key than q is to key."""
epsilon = (10.0 / 10**6)
min_similarity = self.similarity(key, q) + epsilon
return self.most_similar(key, topn=topn, min_similarity=min_similarity,
return_similarities=False)
def get_vectors_mmap(self, log=True):
"""Gets a numpy.memmap of all vectors, blocks if it is still
being built.
"""
if self._all_vectors is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
if not self.memory_db and self.length > 0:
all_vectors = np.memmap(
self.path_to_mmap, dtype=self.dtype, mode='r',
shape=(self.length, self.dim))
self._all_vectors = all_vectors
else:
all_vectors = np.zeros((0, self.dim))
self._all_vectors = all_vectors
break
except BaseException:
if not logged and log and self.log:
_log("Need to build a memory map. "
"This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_mmap_temp = self.path_to_mmap + '.tmp'
tlock = self.MMAP_THREAD_LOCK.acquire(False)
plock = self.MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
values = imap(
lambda kv: kv[1], self._iter(
put_cache=self.lazy_loading == -1,
downloader=True))
try:
with open(path_to_mmap_temp, "w+b") as mmap_file:
all_vectors = np.memmap(
mmap_file, dtype=self.dtype, mode='w+',
shape=(self.length, self.dim))
last_p = 0
for i, value in enumerate(values):
progress = round((float(i) / float(self.length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
all_vectors[i] = value
all_vectors.flush()
try:
del all_vectors
except BaseException:
pass
if not self.closed:
os.rename(path_to_mmap_temp, self.path_to_mmap)
else:
return
finally:
self.MMAP_THREAD_LOCK.release()
try:
self.MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._all_vectors
def get_vectors_mmap_batch_generator(self):
"""Gets batches of get_vectors_mmap()."""
all_vectors = self.get_vectors_mmap()
if self.length > self.batch_size:
for i in range(all_vectors.shape[0]):
batch_start = i * self.batch_size
batch_end = min(batch_start + self.batch_size,
all_vectors.shape[0])
if batch_start >= all_vectors.shape[0]:
break
yield (batch_start, batch_end,
all_vectors[batch_start:batch_end])
if batch_end == all_vectors.shape[0]:
break
else:
yield (0, self.length, all_vectors)
def get_approx_index_chunks(self):
"""Gets decompressed chunks of the AnnoyIndex of the vectors from
the database."""
try:
db = self._db(force_new=True, downloader=True)
num_chunks = db.execute(
"""
SELECT COUNT(rowid)
FROM `magnitude_approx`
WHERE trees = ?
""", (self.approx_trees,)).fetchall()[0][0]
with lz4.frame.LZ4FrameDecompressor() as decompressor:
chunks = db.execute(
"""
SELECT rowid,index_file
FROM `magnitude_approx`
WHERE trees = ?
""", (self.approx_trees,))
for chunk in chunks:
yield num_chunks, decompressor.decompress(chunk[1])
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def get_meta_chunks(self, meta_index):
"""Gets decompressed chunks of a meta file embedded in
the database."""
try:
db = self._db(force_new=True, downloader=True)
num_chunks = db.execute(
"""
SELECT COUNT(rowid)
FROM `magnitude_meta_""" + str(meta_index) + """`
""").fetchall()[0][0]
with lz4.frame.LZ4FrameDecompressor() as decompressor:
chunks = db.execute(
"""
SELECT rowid,meta_file
FROM `magnitude_meta_""" + str(meta_index) + """`
""")
for chunk in chunks:
yield num_chunks, decompressor.decompress(chunk[1])
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def get_approx_index(self, log=True):
"""Gets an AnnoyIndex of the vectors from the database."""
chunks = self.get_approx_index_chunks()
if self._approx_index is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
sys.stdout.flush()
sys.stderr.flush()
approx_index = AnnoyIndex(self.emb_dim, metric='angular')
approx_index.load(self.path_to_approx_mmap)
self._approx_index = approx_index
break
except BaseException:
sys.stdout.flush()
sys.stderr.flush()
if not logged and log and self.log:
_log("Need to build the approximate index."
" This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_approx_mmap_temp = self.path_to_approx_mmap \
+ '.tmp'
tlock = self.APPROX_MMAP_THREAD_LOCK.acquire(False)
plock = self.APPROX_MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
try:
with open(path_to_approx_mmap_temp, "w+b") \
as mmap_file:
last_p = 0
for i, (length, chunk) in enumerate(chunks):
progress = round((float(i) / float(length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_approx_mmap_temp,
self.path_to_approx_mmap)
else:
return
finally:
self.APPROX_MMAP_THREAD_LOCK.release()
try:
self.APPROX_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._approx_index
def get_elmo_embedder(self, log=True):
"""Gets an ElmoEmbedder of the vectors from the database."""
meta_1_chunks = self.get_meta_chunks(1)
meta_2_chunks = self.get_meta_chunks(2)
if self._elmo_embedder is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
if len(self.devices) > 0:
elmo_embedder = ElmoEmbedder(
self.path_to_elmo_o_mmap, self.path_to_elmo_w_mmap,
cuda_device=self.devices[0])
else:
elmo_embedder = ElmoEmbedder(
self.path_to_elmo_o_mmap, self.path_to_elmo_w_mmap)
self._elmo_embedder = elmo_embedder
break
except BaseException:
if not logged and log and self.log:
_log("Need to build ElmoEmbedder. "
"This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_elmo_w_mmap_temp = self.path_to_elmo_w_mmap \
+ '.tmp'
path_to_elmo_o_mmap_temp = self.path_to_elmo_o_mmap \
+ '.tmp'
tlock_w = self.ELMO_W_MMAP_THREAD_LOCK.acquire(False)
plock_w = self.ELMO_W_MMAP_PROCESS_LOCK.acquire(0)
tlock_o = self.ELMO_O_MMAP_THREAD_LOCK.acquire(False)
plock_o = self.ELMO_O_MMAP_PROCESS_LOCK.acquire(0)
if tlock_w and plock_w and tlock_o and plock_o:
try:
with open(path_to_elmo_w_mmap_temp, "w+b") \
as mmap_file:
last_p = 0
for i, (length, chunk) \
in enumerate(meta_1_chunks):
progress = round((float(i) / float(length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_elmo_w_mmap_temp,
self.path_to_elmo_w_mmap)
else:
return
with open(path_to_elmo_o_mmap_temp, "w+b") \
as mmap_file:
for _, chunk in meta_2_chunks:
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_elmo_o_mmap_temp,
self.path_to_elmo_o_mmap)
else:
return
finally:
self.ELMO_W_MMAP_THREAD_LOCK.release()
try:
self.ELMO_W_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
self.ELMO_O_MMAP_THREAD_LOCK.release()
try:
self.ELMO_O_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._elmo_embedder
def _iter(self, put_cache, downloader=False):
"""Yields keys and vectors for all vectors in the store."""
try:
db = self._db(force_new=True, downloader=downloader)
results = db.execute(
"""
SELECT *
FROM `magnitude`
""")
for result in results:
yield self._db_full_result_to_vec(result, put_cache=put_cache)
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def __iter__(self):
"""Yields keys and vectors for all vectors in the store."""
return self._iter(put_cache=True)
def __len__(self):
"""Returns the number of vectors."""
return self.length
def __contains__(self, key):
"""Checks whether a key exists in the vectors"""
return self._vector_for_key_cached(key) is not None
def __getitem__(self, q):
"""Performs the index method when indexed."""
if isinstance(q, slice):
return self.index(list(range(*q.indices(self.length))),
return_vector=True)
else:
return self.index(q, return_vector=True)
def close(self):
"""Cleans up the object"""
self.closed = True
while any([t.is_alive() for t in self._threads]):
sleep(.5)
for conn in self._all_conns:
try:
conn.close()
except Exception:
pass
if hasattr(self, 'fd'):
try:
os.close(self.fd)
except BaseException:
pass
try:
self._all_vectors._mmap.close()
except BaseException:
pass
try:
del self._all_vectors
gc.collect()
except BaseException:
pass
try:
self._approx_index.unload()
except BaseException:
pass
if (hasattr(self, 'MMAP_PROCESS_LOCK') and
hasattr(self.MMAP_PROCESS_LOCK, 'lockfile') and
self.MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'APPROX_MMAP_PROCESS_LOCK') and
hasattr(self.APPROX_MMAP_PROCESS_LOCK, 'lockfile') and
self.APPROX_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.APPROX_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'ELMO_W_MMAP_PROCESS_LOCK') and
hasattr(self.ELMO_W_MMAP_PROCESS_LOCK, 'lockfile') and
self.ELMO_W_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.ELMO_W_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'ELMO_O_MMAP_PROCESS_LOCK') and
hasattr(self.ELMO_O_MMAP_PROCESS_LOCK, 'lockfile') and
self.ELMO_O_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.ELMO_O_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
def __del__(self):
""" Destructor for the class """
try:
self.close()
except BaseException:
pass
class FeaturizerMagnitude(Magnitude):
"""A FeaturizerMagnitude class that subclasses Magnitude and acts as
a way to featurize arbitrary python
Attributes:
number_of_values: number_of_values should be set to the
approximate upper-bound of the number of
feature values that will be looked up with query().
If you don't know the exact number, be conservative
and pick a large number, while keeping in mind the
bigger number_of_values is, the more memory it will
consume
namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, number_of_values=1000000, namespace=None, **kwargs):
self.namespace = namespace
super(
FeaturizerMagnitude,
self).__init__(
None,
_number_of_values=number_of_values,
_namespace=self.namespace,
**kwargs)
class ConcatenatedMagnitude(object):
"""A ConcatenatedMagnitude class that acts as a concatenated interface
to querying multiple magnitude objects.
Attributes:
*args: each arg should be a Magnitude object
"""
def __init__(self, *args, **kwargs):
if len(args) < 2:
raise RuntimeError(
"Must concatenate at least 2 Magnitude objects.")
self.magnitudes = args
self.dim = sum([m.dim for m in self.magnitudes])
all_use_numpy = [m.use_numpy for m in self.magnitudes]
if not all(use_numpy == all_use_numpy[0]
for use_numpy in all_use_numpy):
raise RuntimeError(
"All magnitude objects must have the same use_numpy value.")
self.use_numpy = all_use_numpy[0]
def _take(self, q, multikey, i):
"""Selects only the i'th element from the inner-most axis and
reduces the dimensions of the tensor q by 1.
"""
if multikey == -1:
return q
else:
cut = np.take(q, [i], axis=multikey)
result = np.reshape(cut, np.shape(cut)[0:-1]).tolist()
return result
def _hstack(self, ls, use_numpy):
"""Horizontally stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(ls, axis=-1)
else:
return list(chain.from_iterable(ls))
def _dstack(self, ls, use_numpy):
"""Depth stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(ls, axis=-1)
else:
return [self._hstack((l3[example] for l3 in ls),
use_numpy=use_numpy) for example in xrange(len(ls[0]))] # noqa
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None,
normalized=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
# Check if keys are specified for each concatenated model
multikey = -1
if isinstance(q, tuple):
multikey = 0
if isinstance(q, list) and isinstance(q[0], tuple):
multikey = 1
if (isinstance(q, list) and isinstance(q[0], list) and
isinstance(q[0][0], tuple)):
multikey = 2
# Define args
pad_to_length = pad_to_length or self.magnitudes[0].pad_to_length
pad_left = pad_left or self.magnitudes[0].pad_left
truncate_left = truncate_left or self.magnitudes[0].truncate_left
# Query each model with the right set of keys
v = [m.query(self._take(q, multikey, i), normalized=(
normalized if normalized is not None else m.normalized
))
for i, m in enumerate(self.magnitudes)]
if not isinstance(q, list): # Single key
return self._hstack(v, self.use_numpy)
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
return self._hstack(v, self.use_numpy)
elif isinstance(q, list): # 2D List
return self._dstack(v, self.use_numpy)
class MagnitudeUtils(object):
"""A MagnitudeUtils class that contains static helper utilities."""
@staticmethod
def download_model(
model,
download_dir=os.path.expanduser('~/.magnitude/'),
remote_path='http://magnitude.plasticity.ai/',
log=False,
_download=True,
_local=False):
""" Downloads a remote Magnitude model locally (if it doesn't already
exist) and synchronously returns the local file path once it has
been completed """
# Clean the inputs
orig_model = model
if model.endswith('.magnitude'):
model = model[:-10]
if model.startswith('http://') or model.startswith('https://'):
remote_path = ''
if model.startswith('http://magnitude.plasticity.ai/'):
model = model.replace('http://magnitude.plasticity.ai/', '')
remote_path = 'http://magnitude.plasticity.ai/'
if model.startswith('https://magnitude.plasticity.ai/'):
model = model.replace('https://magnitude.plasticity.ai/', '')
remote_path = 'https://magnitude.plasticity.ai/'
if not remote_path.endswith('/') and len(remote_path) > 0:
remote_path = remote_path + '/'
# Local download
local_file_name = model.replace('/', '_') + '.magnitude'
local_file_name_tmp = model.replace('/', '_') + '.magnitude.tmp'
remote_file_path = remote_path + model + '.magnitude'
if not _download:
return remote_file_path
# Make the download directories
try:
os.makedirs(download_dir)
except OSError:
if not os.path.isdir(download_dir):
raise RuntimeError("The download folder is not a folder.")
if not os.path.isfile(os.path.join(download_dir, local_file_name)):
try:
if log:
_log("Downloading '.magnitude' file..."
"this may take some time. If you want "
"to stream the model, pass stream=True "
"to the Magnitude constructor instead."
"This only needs to happen once.")
urlretrieve(
remote_file_path,
os.path.join(download_dir, local_file_name_tmp)
)
conn = sqlite3.connect(
os.path.join(
download_dir,
local_file_name_tmp))
conn.cursor().execute("SELECT * FROM magnitude_format")
conn.close()
os.rename(
os.path.join(
download_dir,
local_file_name_tmp),
os.path.join(
download_dir,
local_file_name))
except BaseException:
if _local:
raise RuntimeError(
"The path to the Magnitude file at '" + orig_model + "' could not be found. Also failed to find a valid remote model at the following URL: " + # noqa
remote_file_path)
else:
raise RuntimeError(
"The download could not be completed. Are you sure a valid model exists at the following URL: " + # noqa
remote_file_path)
return os.path.join(download_dir, local_file_name)
@staticmethod
def batchify(X, y, batch_size): # noqa: N803
""" Creates an iterator that chunks `X` and `y` into batches
that each contain `batch_size` elements and loops forever"""
X_batch_generator = cycle([X[i: i + batch_size] # noqa: N806
for i in xrange(0, len(X), batch_size)])
y_batch_generator = cycle([y[i: i + batch_size]
for i in xrange(0, len(y), batch_size)])
return izip(X_batch_generator, y_batch_generator)
@staticmethod
def class_encoding():
"""Creates a set of functions to add a new class, convert a
class into an integer, and the integer back to a class."""
class_to_int_map = {}
int_to_class_map = None
def add_class(c):
global int_to_class_map
int_to_class_map = None
return class_to_int_map.setdefault(
c, len(class_to_int_map))
def class_to_int(c):
return class_to_int_map[c]
def int_to_class(i):
global int_to_class_map
if int_to_class_map is None:
int_to_class_map = {v: k
for k, v in (
(
hasattr(class_to_int_map, 'iteritems') and # noqa
class_to_int_map.iteritems
) or
class_to_int_map.items
)()}
return int_to_class_map[i]
return add_class, class_to_int, int_to_class
@staticmethod
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=np.float32)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@staticmethod
def from_categorical(categorical):
"""Converts a binary class matrix to a class vector (integers)"""
return np.argmax(categorical, axis=1)
if _APSW_LIB == 'internal':
class HTTPVFSFileCache():
""" This cache sort of acts like a predictor for sequential
network reads. It proactively pulls in more data than
requested from the network if it sees a pattern of sequential
reads. The amount of data predictively pulled is
adjusts based on the last few true sequential reads.
"""
def __init__(self, vfsfile):
self.vfsfile = vfsfile
self.cache_size = None
self._start_offset = 0
self.running_hit_direction = 0
self.running_hit_last_start = float("inf")
self.running_hit_last_end = 0
self.running_forward_hit_amount = 0
self.running_backward_hit_amount = 0
self.running_hit_amount = 0
self.time = time.time()
self.id = uuid.uuid4().int
self.data = "".encode('utf-8')
def length_of_data(self):
"""Returns the length of the cached data."""
return len(self.data)
def get_data(self):
"""Returns the cached data."""
return self.data
def set_data(self, data):
"""Sets the cached data."""
self.data = data
def add_to_caches(self):
"""Adds self to the caches."""
self.vfsfile.caches.append(self)
def save_cache(self):
"""Saves the cache."""
pass
def delete_caches(self):
"""Deletes old caches."""
current_time = time.time()
self.vfsfile.caches = [
cache for cache in self.vfsfile._get_caches() if (
current_time - cache.time) <= self.vfsfile.cache_ttl]
def get_cache(self, amount, offset):
"""Checks if a cache exists for the data offset, and amount to read,
if so, return the cache, and the start and end range to read
from the cache's data.
Keeps track of forward sequential reads, and backward
sequential reads for the cache.
"""
return_val = [None, None, None, None, None, None, None]
measure_cache_size = self.cache_size is None
if measure_cache_size:
self.cache_size = 0
for c in self.vfsfile._get_caches():
if measure_cache_size:
self.cache_size += c.length_of_data()
start = offset - c._start_offset
end = start + amount
close_to_last_end = (
abs(start - c.running_hit_last_end) <
self.vfsfile.sequential_cache_gap_tolerance)
close_to_last_start = (
abs(c.running_hit_last_start - end) <
self.vfsfile.sequential_cache_gap_tolerance)
small_read = self.vfsfile.sequential_cache_default_read * 2 # noqa
if start >= 0 and c.length_of_data() >= end:
# Cache hit
# Keeps track of the total running
# amount of sequentially read
# bytes on the cache, and the direction
if start >= c.running_hit_last_end:
# Forward sequential
c.running_forward_hit_amount = \
c.running_forward_hit_amount + amount
if (c.running_forward_hit_amount !=
c.running_backward_hit_amount):
c.running_hit_direction = max(
(c.running_forward_hit_amount, 1),
(c.running_backward_hit_amount, -1))[1]
else:
c.running_hit_direction = 1
if end <= c.running_hit_last_start:
# Backward sequential
c.running_backward_hit_amount = \
c.running_backward_hit_amount + amount
if (c.running_forward_hit_amount !=
c.running_backward_hit_amount):
c.running_hit_direction = max(
(c.running_forward_hit_amount, 1),
(c.running_backward_hit_amount, -1))[1]
else:
c.running_hit_direction = -1
c.running_hit_amount = max(
c.running_forward_hit_amount,
c.running_backward_hit_amount)
c.running_hit_last_start = start
c.running_hit_last_end = end
c.time = time.time()
return_val = (
c.running_hit_amount,
c.running_hit_direction,
c.running_forward_hit_amount,
c.running_backward_hit_amount,
start,
end,
c
)
c.save_cache()
elif (
(return_val[0] is None or (isinstance(return_val, list) and
c.running_hit_amount > return_val[0])) and # noqa
start >= c.running_hit_last_end and
close_to_last_end
):
# Complete cache miss, but it is still a close forward
# sequential read of the current cache, return
# the running sequentially read byte information
# so it can be added to the next cache
return_val[1] = 1
if return_val[1] != c.running_hit_direction:
return_val[0] = small_read
return_val[2] = small_read
return_val[3] = small_read
else:
return_val[0] = c.running_hit_amount
return_val[2] = c.running_forward_hit_amount
return_val[3] = c.running_backward_hit_amount
elif (
(return_val[0] is None or (isinstance(return_val, list) and
c.running_hit_amount > return_val[0])) and # noqa
end <= c.running_hit_last_start and
close_to_last_start
):
# Partial cache miss, but it is still a close backward
# sequential read of the current cache, return
# the running sequentially read byte information
# so it can be added to the next cache
return_val[1] = -1
if return_val[1] != c.running_hit_direction:
return_val[0] = small_read
return_val[2] = small_read
return_val[3] = small_read
else:
return_val[0] = c.running_hit_amount
return_val[2] = c.running_forward_hit_amount
return_val[3] = c.running_backward_hit_amount
return return_val
def write_data(self, start_offset, data, amount, offset):
"""Writes data fetched to the network cache and
returns only the amount requested back."""
# Writes the entire data fetched to the cache
if self.vfsfile.should_cache:
# Uses itself as a cache object
self._start_offset = start_offset
self.set_data(data)
if self.vfsfile.trace_log:
print("[HTTPVFS] Cache Size: %d bytes" % (self.cache_size,))
# Purge old caches
current_time = time.time()
if ((current_time -
self.vfsfile.last_cache_purge) >
self.vfsfile.ttl_purge_interval):
if self.vfsfile.trace_log:
print("[HTTPVFS] Purging expired caches...")
self.vfsfile.last_cache_purge = current_time
self.delete_caches()
# Adds itself to the cache array, so the next read
# succeed
self.add_to_caches()
return data[offset -
start_offset: (offset - start_offset) + amount]
def _prefetch_in_background(
self,
_prefetch_in_background,
amount,
offset,
sequential):
"""Prefetches data from the network to the cache."""
# Store the extra data fetched back in the network cache
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching in background @ %d + %d" %
(offset, amount))
try:
if sequential:
data = _prefetch_in_background(
self.vfsfile.SEQUENTIAL, amount, offset)
else:
data = _prefetch_in_background(
self.vfsfile.RANDOM_ACCESS, amount, offset)
cache = HTTPVFSFileCache(self.vfsfile)
if data:
cache.write_data(offset, data, 0, offset)
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Finished prefetching @ %d + %d" %
(offset, amount))
else:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching terminated early @ %d + %d" %
(offset, amount))
except BaseException:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching error @ %d + %d" %
(offset, amount))
pass
def prefetch_in_background(
self,
_prefetch_in_background,
amount,
offset,
sequential=False):
"""Prefetches data from the network to the cache
in the background."""
if self.vfsfile.trace_log:
if sequential:
print(
"[HTTPVFS] Sequential prefetching "
"request @ %d + %d" %
(offset, amount))
else:
print(
"[HTTPVFS] Random access prefetching "
"request @ %d + %d" %
(offset, amount))
self.vfsfile.prefetch_threads = [
t for t in self.vfsfile.prefetch_threads if t.is_alive()]
if (len(self.vfsfile.prefetch_threads) <=
self.vfsfile.prefetch_thread_limit or sequential):
prefetch_thread = threading.Thread(
target=self._prefetch_in_background,
args=(
_prefetch_in_background,
amount,
offset,
sequential),
name='HTTPVFSFileCache' +
(
'Sequential' if sequential else '') +
'PrefetchThread@' +
str(offset) +
'+' +
str(amount))
prefetch_thread.daemon = True
if sequential:
if self.vfsfile.sequential_prefetch_thread:
self.vfsfile.sequential_prefetch_thread.do_run = False
self.vfsfile.sequential_prefetch_thread = prefetch_thread
else:
self.vfsfile.prefetch_threads.append(prefetch_thread)
prefetch_thread.start()
else:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Ignoring prefetch request @ %d + %d, "
"reached prefetch thread limit" %
(offset, amount))
def read_data(self, amount, offset, _prefetch_in_background=None):
"""Reads data from the network cache and
returns only the amount requested back or
Returns None if there is a cache miss, and prefetches more data
into the cache using _prefetch_in_background(amount, offset)
if it detects a non-sequential access pattern in the
cache misses."""
# Don't do anything if caching is disabled
if not self.vfsfile.should_cache:
return None
# Find the closest cache match
current_time = time.time()
(
running_hit_amount,
running_hit_direction,
running_forward_hit_amount,
running_backward_hit_amount,
start,
end,
cache
) = self.get_cache(amount, offset)
if running_hit_amount is not None:
if (self.vfsfile.sequential_cache_exponential_read_growth and
cache is None):
# Reached a cache miss, but still sequentially reading
# If exponential sequential cache reads are on, double the
# read size
running_hit_amount = min(
running_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
running_forward_hit_amount = min(
running_forward_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
running_backward_hit_amount = min(
running_backward_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
self.running_forward_hit_amount = running_forward_hit_amount
self.running_backward_hit_amount = running_backward_hit_amount
self.running_hit_amount = running_hit_amount
self.running_hit_direction = running_hit_direction
self.vfsfile.running_hit_direction = running_hit_direction
if cache is None:
self.vfsfile.cache_amount = min(
running_hit_amount,
self.vfsfile.sequential_cache_max_read
)
self.save_cache()
else:
if cache is None:
# Cache miss, and not a sequential read, only read a small
self.vfsfile.cache_amount = \
self.vfsfile.sequential_cache_default_read
self.save_cache()
if cache:
data = cache.get_data()[start:end]
# Adjust the cache amount for the next read
self.vfsfile.running_hit_direction = cache.running_hit_direction
self.vfsfile.cache_amount = min(
cache.running_hit_amount,
self.vfsfile.sequential_cache_max_read)
return data
elif self.vfsfile.random_access_cache_prefetch:
# Keep track of regions of the file where there are cache
# misses. Each "hit" on a file is analyzed and clustered into
# "groups" of hits, sequential "hits" are ignored.
# Purge old hit patterns
if (current_time - self.vfsfile.last_random_access_hit_tracker_purge) > self.vfsfile.ttl_purge_interval: # noqa
if self.vfsfile.trace_log:
print("[HTTPVFS] Purging expired hit trackers...")
self.vfsfile.last_random_access_hit_tracker_purge = \
current_time
self.vfsfile.hit_pattern = [hit for hit in self.vfsfile.hit_pattern if ((current_time - hit[4]) <= self.vfsfile.random_access_hit_tracker_ttl)] # noqa
# Find the closest cluster of hits for the current miss
hit_index = bisect.bisect_left(
KeyList(
self.vfsfile.hit_pattern,
key=lambda x: x[0]),
offset)
hit_index_area = []
if hit_index - 1 >= 0:
hit_index_area.append(hit_index - 1)
if hit_index < len(self.vfsfile.hit_pattern):
hit_index_area.append(hit_index)
if len(hit_index_area) > 0:
hit_index = min(
hit_index_area, key=lambda x: abs(
self.vfsfile.hit_pattern[x][0] - offset))
# Add the current miss to the closest cluster, and evaluate
# if it should be prefetched
hit = self.vfsfile.hit_pattern[hit_index]
dist = abs(hit[0] - offset)
if dist <= self.vfsfile.random_access_cache_range:
self.vfsfile.hit_pattern[hit_index] = [
(offset + hit[0]) / 2.0,
(dist + hit[1]) / 2.0 if dist > hit[1] else hit[1],
hit[2] + 1 if offset > hit[0] else hit[2],
hit[3] + 1 if offset < hit[0] else hit[3],
current_time]
hit = self.vfsfile.hit_pattern[hit_index]
if hit[2] >= hit[3] * 2 and (hit[2] + hit[3]) > 8:
# Looks like a forward sequential read pattern,
# ignore
del self.vfsfile.hit_pattern[hit_index]
elif hit[3] >= hit[2] * 2 and (hit[2] + hit[3]) > 8:
# Looks like a backward sequential read pattern,
# ignore
del self.vfsfile.hit_pattern[hit_index]
elif (_prefetch_in_background and (hit[2] > 2) and
(hit[3] > 2) and (hit[2] + hit[3]) > 30):
# If a certain region of the file, is being "hit"
# frequently for smaall chunks of data within a
# larger range, prefetch that region of the file
# and data surrounding it to prevent future
# cache misses
self.prefetch_in_background(
_prefetch_in_background, int(
hit[1] * 2), max(int(hit[0] - hit[1]), 0)
)
return None
# mean, range, positive direction, negative direction, time
self.vfsfile.hit_pattern.insert(
hit_index, [offset, 0, 0, 0, current_time])
class HTTPVFSFileMemoryMappedCache(HTTPVFSFileCache):
""" This cache is like HTTPVFSFileCache
except all cache data is memory mapped
"""
def __init__(self, vfsfile, cache_dir_path, cache_key=None):
self.cache_dir_path = cache_dir_path
self.cache_key = cache_key
HTTPVFSFileCache.__init__(self, vfsfile)
if self.cache_key and self.cache_key != '.DS_Store':
cache_key_split = cache_key.split('.')[0].split('_')
self._start_offset = int(cache_key_split[0])
self.running_hit_direction = int(cache_key_split[1])
self.running_hit_last_start = (
float(
cache_key_split[2])
if cache_key_split[2] == 'inf' else int(
cache_key_split[2]))
self.running_hit_last_end = int(cache_key_split[3])
self.running_forward_hit_amount = int(cache_key_split[4])
self.running_backward_hit_amount = int(cache_key_split[5])
self.running_hit_amount = int(cache_key_split[6])
self.time = float(cache_key_split[7])
self.id = int(cache_key_split[8])
else:
self.cache_key = self.create_key()
def length_of_data(self):
"""Returns the length of the cached data."""
try:
return os.path.getsize(os.path.join(self.cache_dir_path,
self.cache_key))
except BaseException:
return 0
def add_to_mmaps(self, new, mm):
"""Adds a new mmap, evicting old mmaps if the maximum has been
reached."""
while (len(self.vfsfile.cache_mmaps_heap) >=
self.vfsfile.mmap_max_files):
_, evict = heapq.heappop(self.vfsfile.cache_mmaps_heap)
try:
evict_mm = self.vfsfile.cache_mmaps[evict]
except BaseException:
pass
try:
evict_mm.close()
except BaseException:
pass
try:
del self.vfsfile.cache_mmaps[evict]
except BaseException:
pass
heapq.heappush(self.vfsfile.cache_mmaps_heap,
(time.time(), new))
self.vfsfile.cache_mmaps[new] = mm
def get_mmap(self, create=True):
"""Gets the mmap for a key, opening a mmap to the file
if a mmap doesn't exist, creating a file, then opening a mmap
to it if the file doesn't exist."""
if (self.cache_key not in self.vfsfile.cache_mmaps and create):
joined = os.path.join(self.cache_dir_path,
self.cache_key)
if os.path.exists(os.path.join(self.cache_dir_path,
self.cache_key)):
f = open(joined, "r+b")
mm = mmap.mmap(f.fileno(), self.length_of_data())
f.close()
else:
f = open(joined, "w+b")
f.write("\0".encode('utf-8'))
f.flush()
os.fsync(f.fileno())
mm = mmap.mmap(f.fileno(), 1)
f.close()
self.add_to_mmaps(self.cache_key, mm)
try:
return self.vfsfile.cache_mmaps[self.cache_key]
except BaseException as e:
if create:
return e
else:
return None
def get_data(self):
"""Returns the cached data."""
return self.get_mmap()
def set_data(self, data):
"""Sets the cached data."""
self.save_cache()
mm = self.get_mmap(create=False)
try:
del self.vfsfile.cache_mmaps[self.cache_key]
except BaseException:
pass
try:
mm.close()
except BaseException:
pass
f = open(os.path.join(self.cache_dir_path,
self.cache_key), "w+b")
f.write(data)
f.flush()
os.fsync(f.fileno())
mm = None
mm = mmap.mmap(f.fileno(), len(data))
f.close()
self.vfsfile.cache_mmaps[self.cache_key] = mm
def create_key(self):
"""Serializes instance variables into a key."""
return '_'.join([
str(self._start_offset),
str(self.running_hit_direction),
str(self.running_hit_last_start),
str(self.running_hit_last_end),
str(self.running_forward_hit_amount),
str(self.running_backward_hit_amount),
str(self.running_hit_amount),
str(int(self.time)),
str(self.id),
]) + '.supersqlmmap'
def add_to_caches(self):
"""Adds self to the caches."""
pass
def save_cache(self):
"""Saves the cache."""
new_key = self.create_key()
old = os.path.join(self.cache_dir_path,
self.cache_key)
new = os.path.join(self.cache_dir_path, new_key)
try:
os.rename(old, new)
except BaseException:
pass
try:
mm = self.vfsfile.cache_mmaps[self.cache_key]
del self.vfsfile.cache_mmaps[self.cache_key]
self.add_to_mmaps(new_key, mm)
except BaseException:
pass
self.cache_key = new_key
def delete_caches(self):
"""Deletes old caches."""
current_time = time.time()
for cache in self.vfsfile._get_caches():
if cache.id == self.id:
continue
if (current_time - cache.time) > self.vfsfile.cache_ttl:
try:
mmap = cache.get_mmap(create=False)
except BaseException:
pass
try:
del self.vfsfile.cache_mmaps[self.cache_key]
except BaseException:
pass
try:
mmap.close()
except BaseException:
pass
try:
os.remove(os.path.join(cache.cache_dir_path,
cache.cache_key))
except BaseException:
pass
class HTTPVFSFile(apsw.VFSFile):
""" This acts as the representation of a single file on
the HTTP virtual file system.
"""
def __init__(self, inheritfromvfsname, name, flags, vfs, options=None):
# Constants
self.RANDOM_ACCESS = 0
self.SEQUENTIAL = 1
# Cache + Network configuration
defaults = {
'should_cache': True,
'network_retry_delay': 10,
'max_network_retries': 10,
'sequential_cache_default_read': 4096 * 2,
'sequential_cache_gap_tolerance': 10 * (1024 ** 2),
'sequential_cache_max_read': 20 * (1024 ** 2),
'sequential_cache_exponential_read_growth': True,
'prefetch_thread_limit': 3,
'sequential_cache_prefetch': True,
'random_access_cache_prefetch': True,
'random_access_cache_range': 100 * (1024 ** 2),
'random_access_hit_tracker_ttl': 60,
'cache_ttl': 60,
'ttl_purge_interval': 5,
'use_mmap': False,
'mmap_max_files': 10,
'temp_dir': tempfile.gettempdir(),
'trace_log': False,
}
defaults.update(options or {})
for k, v in defaults.items():
setattr(self, k, v)
self.max_network_retries = max(self.max_network_retries, 4)
if not self.should_cache:
self.sequential_cache_prefetch = False
self.random_access_cache_prefetch = False
self.sequential_cache_default_read = 0
self.cache_amount = 0
# Cache initialization
self.caches = []
self.cache_mmaps_heap = []
self.cache_mmaps = {}
self.cache_amount = self.sequential_cache_default_read
self.last_cache_purge = 0
self.last_random_access_hit_tracker_purge = 0
# Prefetch Connections
self.pconn_terminated = {}
self.pconn_count = {}
self.pconn = {}
# Connection lock
self.conn_lock = threading.RLock()
# State to keep tracking adjusting the predictive network cache
# window
self.running_hit_direction = 0
self.hit_pattern = []
# Keep track of threads
self.prefetch_threads = []
self.sequential_prefetch_thread = None
# Initialization
self.vfs = vfs
self.length = 99999999999999999
self.name = name
self.tries = 1
self.url = self.name.filename()
url_cis = self.url.lower()
try:
self.url = self.url[url_cis.index('http://'):]
self.parsed_url = urlparse(self.url)
self._prepare_connection()
if self.random_access_cache_prefetch:
self._prepare_prefetch_connection(self.RANDOM_ACCESS)
if self.sequential_cache_prefetch:
self._prepare_prefetch_connection(self.SEQUENTIAL)
except BaseException:
try:
self.url = self.url[url_cis.index('https://'):]
self.parsed_url = urlparse(self.url)
self._prepare_connection()
if self.random_access_cache_prefetch:
self._prepare_prefetch_connection(self.RANDOM_ACCESS)
if self.sequential_cache_prefetch:
self._prepare_prefetch_connection(self.SEQUENTIAL)
except BaseException:
raise RuntimeError("Invalid URL.")
self.cache_dir = (
hashlib.md5(
self.url.encode('utf-8')).hexdigest() +
'_supersqlmmap')
self.cache_dir_path = os.path.join(self.temp_dir, self.cache_dir)
try:
os.makedirs(self.cache_dir_path + '/')
except OSError:
pass
# Prepare the VFS
apsw.VFSFile.__init__(self, inheritfromvfsname, os.devnull, flags)
def _new_connection(self):
"""Creates an HTTP connection"""
if self.parsed_url.scheme.lower() == 'http':
return http.client.HTTPConnection(
self.parsed_url.netloc, timeout=60)
else:
return http.client.HTTPSConnection(
self.parsed_url.netloc, timeout=60)
def _prepare_connection(self, new=True):
"""Prepares a new HTTP connection"""
try:
self.conn.close()
except BaseException:
pass
if new:
self.conn = self._new_connection()
def _prepare_prefetch_connection(self, n, new=True):
"""Prepares a new HTTP connection"""
try:
self.pconn_terminated[n] = True
while self.pconn_count[n] > 0:
sleep(1)
self.pconn[n].close()
except BaseException:
pass
if new:
self.pconn[n] = self._new_connection()
self.pconn_count[n] = 0
self.pconn_terminated[n] = False
def _wait_on_prefetch_connection(self, n):
self.pconn_count[n] += 1
def _unwait_on_prefetch_connection(self, n):
self.pconn_count[n] -= 1
def _network_error(self, e, i):
"""Handles an network error"""
if self.trace_log:
print("[HTTPVFS] Network Error: %s" % (str(e),))
if i + 1 >= self.tries:
raise RuntimeError(
"Could not reach the server at: '" + self.url + "'")
else:
if self.trace_log:
print("[HTTPVFS] Refreshing network connection...")
self.conn_lock.acquire()
self._prepare_connection()
self.conn_lock.release()
if i > 2:
if self.trace_log:
print("[HTTPVFS] Waiting before retrying...")
sleep(self.network_retry_delay)
if self.trace_log:
print("[HTTPVFS] Retrying...")
def _prefetch_in_background(self, n, amount, offset):
headers = {
'Range': "bytes=" + str(max(offset, 0)) + "-" + str(
min((offset + amount) - 1, self.length) # noqa
),
}
self._wait_on_prefetch_connection(n)
while not self.pconn_terminated[n]:
try:
self.pconn[n].request(
"GET", self.parsed_url.path, headers=headers)
break
except CannotSendRequest:
sleep(1)
while not self.pconn_terminated[n]:
try:
res = self.pconn[n].getresponse()
break
except ResponseNotReady:
# Since we are sharing the connection wait for this to be
# ready
sleep(1)
if self.pconn_terminated[n]:
self._unwait_on_prefetch_connection(n)
return
else:
self._unwait_on_prefetch_connection(n)
if not(res.status >= 200 and res.status <= 299):
# Check for a valid status from the server
return
data = bytearray(res.length)
i = 0
for piece in iter(lambda: res.read(1024), bytes('')):
if not getattr(threading.currentThread(), "do_run", True):
break
data[i:i + len(piece)] = piece
i = i + len(piece)
else:
return bytes(data)
# Leaving the thread early, without
# reading all of the data this will
# make the connection unusable, refresh it
self._prepare_prefetch_connection(n)
def _get_caches(self):
"""Gets all of the caches."""
if self.use_mmap:
return [
HTTPVFSFileMemoryMappedCache(
self,
self.cache_dir_path,
cache_key) for cache_key in os.listdir(
self.cache_dir_path)]
else:
return self.caches
def xRead(self, amount, offset): # noqa: N802
"""Intercepts SQLite's file read command"""
if self.trace_log:
print("[HTTPVFS] Read request @ %d + %d" % (offset, amount))
for i in range(self.tries):
try:
# Try to see if we have already read the data
# and cached it
if self.use_mmap:
cache = HTTPVFSFileMemoryMappedCache(
self, self.cache_dir_path)
else:
cache = HTTPVFSFileCache(self)
data = cache.read_data(
amount, offset, self._prefetch_in_background)
if data is None:
if self.trace_log and self.should_cache:
print(
"[HTTPVFS] Cache miss for request @ %d + %d" %
(offset, amount))
# Fire off a network request with the range of bytes
# (potentially predicatively reading a larger amount
# and storing it in the network cache)
if self.running_hit_direction >= 0:
# Read the amount requested + extra
# in the forward sequential direction
# to save in the cache
start = max(offset, 0)
end = min(
(offset + max(self.cache_amount, amount)) - 1,
self.length)
else:
# Read the amount requested + extra
# in the backward sequential direction
# to save in the cache
start = max(offset - self.cache_amount, 0)
end = min((offset + amount) - 1, self.length)
# Cancel any previous sequential prefetches, the current
# chunk data of data was requested too fast for any
# background prefetches to load the cache, must
# request it synchronously
if self.sequential_prefetch_thread:
self.sequential_prefetch_thread.do_run = False
# Synchronously request the current chunk from the
# network
headers = {
'Range': "bytes=" + str(start) + "-" + str(end),
}
self.conn_lock.acquire()
self.conn.request(
"GET", self.parsed_url.path, headers=headers)
res = self.conn.getresponse()
if not(res.status >= 200 and res.status <= 299):
# Check for a valid status from the server
raise RuntimeError(
"HTTP Status Code Error from Server")
if self.trace_log:
print(
"[HTTPVFS] Fetching @ %d + %d for "
"request @ %d + %d" %
(start, 1 + end - start, offset, amount))
data = res.read()
self.conn_lock.release()
if self.trace_log:
print(
"[HTTPVFS] Done fetching @ %d + %d for "
"request @ %d + %d" %
(start, 1 + end - start, offset, amount))
# Store the extra data fetched back in the network cache
data = cache.write_data(start, data, amount, offset)
# Prefetch the next sequential chunk of data in the
# background
if self.sequential_cache_prefetch and self.should_cache:
if self.running_hit_direction >= 0:
cache.prefetch_in_background(
self._prefetch_in_background,
self.cache_amount,
start + self.cache_amount * 1,
sequential=True)
else:
cache.prefetch_in_background(
self._prefetch_in_background,
self.cache_amount,
start - self.cache_amount * 1,
sequential=True)
else:
if self.trace_log:
print(
"[HTTPVFS] Cache hit for request @ %d + %d" %
(offset, amount))
# Return the data to SQLite
return data
except BaseException as e:
try:
self.conn_lock.release()
except BaseException:
pass
# Handle a network error
self._network_error(e, i)
def xWrite(self, data, offset): # noqa: N802
"""Intercepts SQLite's file write command"""
# Can't write to an HTTP server, ignore
pass
def xFileSize(self): # noqa: N802
"""Intercepts SQLite's file size command"""
for i in range(self.tries):
try:
# Fire of a content-length request to the server
self.conn_lock.acquire()
self.conn.request("GET", self.parsed_url.path)
res = self.conn.getresponse()
self.tries = self.max_network_retries
self.length = res.length
self._prepare_connection()
self.conn_lock.release()
return self.length
except BaseException as e:
try:
self.conn_lock.release()
except BaseException:
pass
# Handle a network error
self._network_error(e, i)
def xClose(self): # noqa: N802
"""Intercepts SQLite's file close command"""
ident = self.name.filename()
with self.vfs.files_lock:
if ident in self.vfs.files:
if self.vfs.files[ident][0] <= 1:
for t in self.prefetch_threads:
t.do_run = False
if self.sequential_prefetch_thread:
self.sequential_prefetch_thread.do_run = False
self._prepare_prefetch_connection(
self.RANDOM_ACCESS, new=False)
self._prepare_prefetch_connection(
self.SEQUENTIAL, new=False)
self._prepare_connection(new=False)
del self.vfs.files[ident]
while len(self.cache_mmaps_heap) >= 0:
_, evict = heapq.heappop(self.cache_mmaps_heap)
try:
evict_mm = self.cache_mmaps[evict]
except BaseException:
pass
try:
evict_mm.close()
except BaseException:
pass
try:
del self.cache_mmaps[evict]
except BaseException:
pass
else:
self.vfs.files[ident] = (
self.vfs.files[ident][0] - 1,
self.vfs.files[ident][1])
class HTTPVFS(apsw.VFS):
""" This acts as the representation of a filesystem that
proxies to HTTP requests so that SQLite can connect
to HTTP URLs.
"""
def __init__(self, vfsname="http", basevfs="", options=None):
self.vfsname = vfsname
self.basevfs = basevfs
self.options = options or {}
apsw.VFS.__init__(self, self.vfsname, self.basevfs)
self.files = {}
self.files_lock = threading.RLock()
def xOpen(self, name, flags=apsw.SQLITE_OPEN_MAIN_DB): # noqa: N802
"""Intercepts SQLite's file open command"""
flags[1] = flags[1] | apsw.SQLITE_OPEN_READONLY
if flags[0] & apsw.SQLITE_OPEN_MAIN_DB:
ident = name.filename()
with self.files_lock:
if ident not in self.files:
self.files[ident] = (1, HTTPVFSFile(
self.basevfs, name, flags, self, self.options))
else:
self.files[ident] = (
self.files[ident][0] + 1, self.files[ident][1])
return self.files[ident][1]
else:
return None
|
load_ui.py
|
from tkinter import *
import tkinter as tk
from tkinter import messagebox
import time
from tkinter import filedialog
import main
import ntpath
from tkinter.ttk import Progressbar
import time
import threading
from tkinter import HORIZONTAL
import subprocess
import os
class Splash(tk.Toplevel):
def __init__(self, parent):
tk.Toplevel.__init__(self, parent)
self.title("Splash")
self.configure(bg="#CDDDFD")
# img = ImageTk.PhotoImage(file = "logo.png")
# panel = tk.Label(self, image = img)
# # panel.place(side = "bottom", fill = "both", expand = "yes")
# panel.pack()
self.geometry("300x300")
label = tk.Label(self, text="Bar Chart Race", bg="#CDDDFD")
label.place(relx=0.5, rely=0.5, anchor=CENTER)
self.overrideredirect(True)
## required to make window show before the program gets to the mainloop
self.update()
'''
Advanced Options
'''
class PopupWindow(tk.Tk):
def __init__(self, data):
tk.Tk.__init__(self)
self.data = data
self.withdraw()
self.title("Advanced Options")
self.geometry("550x550")
label = tk.Label(self,text="Select Advanced Options",)
label.place(relx=0.3, rely=0.1)
self.bg_color = "#CDDDFD"
self.btn_color = "#0556F3"
self.hint_color = "#464646"
self.configure(bg=self.bg_color)
self.input_frame = tk.LabelFrame(self, text="Get More Details", bd=2, bg=self.bg_color, height=80)
self.input_frame.pack(fill="both", expand="yes")
'''
Input Fields
'''
# get bar size
self.bar_size = tk.Entry(self.input_frame, textvariable=StringVar(self, value=self.data.get('bar_thickness')))
self.bar_size.place(relx=0.6, rely=0.05)
# get text_type_bar_label
optionList = ["Decimal","Integer"]
self.dropVar=StringVar()
self.dropVar.set(optionList[0])
self.text_type_bar_label = tk.OptionMenu(self.input_frame, self.dropVar, *optionList,)
self.text_type_bar_label.place(relx=0.6, rely=0.3)
# get text_after_bar_label
self.text_after_bar_label = tk.Entry(self.input_frame, textvariable=StringVar(self, value=self.data.get('text_after_bar_label')))
self.text_after_bar_label.place(relx=0.6, rely=0.5)
'''
Functions
'''
self.okButton()
self.barSizeEntry()
self.barLabelTextType()
self.textAfterBarLabelEntry()
# bar_size
def barSizeEntry(self):
label_1 = tk.Label(self.input_frame, text="Thickness of the Bar", bg=self.bg_color)
label_1.place(relx=0.05, rely=0.05)
label_2 = tk.Label(self.input_frame, text="*The value should be a decimal between 0 and 1. eg: 0.95", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.05, rely=0.15)
# bar_label_text_type
def barLabelTextType(self):
label_1 = tk.Label(self.input_frame, text="Bar label type", bg=self.bg_color)
label_1.place(relx=0.05, rely=0.3)
label_2 = tk.Label(self.input_frame, text="*The type of text for the bar label", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.05, rely=0.4)
# text_after_bar_label
def textAfterBarLabelEntry(self):
label_1 = tk.Label(self.input_frame, text="Text after the bar label", bg=self.bg_color)
label_1.place(relx=0.05, rely=0.5)
label_2 = tk.Label(self.input_frame, text="*Any symbol or text after the bar value. In other words, it is the unit of the data. eg: %", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.05, rely=0.6)
# ok button
def okButton(self):
btn = tk.Button(self.input_frame, text="Apply Changes", command=self.closeWindow,)
btn.place(relx=0.45, rely=0.8)
def closeWindow(self):
self.withdraw()
class BCR_UI(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.withdraw()
splash = Splash(self)
## setup stuff goes here
self.title("Bar Chart Race")
## simulate a delay while loading
time.sleep(3)
## finished loading so destroy splash
splash.destroy()
## show window again
self.deiconify()
self.geometry("800x800")
self.bg_color = "#CDDDFD"
self.btn_color = "#0556F3"
self.hint_color = "#464646"
self.details_frame = tk.LabelFrame(self, text="Get Details", bg=self.bg_color, bd=2)
self.details_frame.pack(fill="both", expand="yes")
self.parameter_frame = tk.LabelFrame(self, text="Enter Parameters", bg=self.bg_color, bd=2)
self.parameter_frame.pack(fill="both", expand="yes")
self.advanced_frame = tk.LabelFrame(self, text="Advanced Options", bg=self.bg_color, bd=2, height=65)
self.advanced_frame.pack(fill="both", )
self.run_frame = tk.LabelFrame(self, text="Create Video", bg=self.bg_color, bd=2, height=65)
self.run_frame.pack(fill="both", )
self.configure(bg=self.bg_color)
self.i_flag = False
self.s_flag = False
self.c_flag = False
self.d_flag = False
'''
Adjustable parameters
'''
# get title
self.title_entry = tk.Entry(self.parameter_frame, textvariable=StringVar(self, value=''))
self.title_entry.place(relx=0.6, rely=0.1)
# get colors
self.color_entry = tk.Entry(self.parameter_frame, textvariable=StringVar(self, value="#6ECBCE,#FF2243,#FFC33D,#CE9673"))
self.color_entry.place(relx=0.6, rely=0.3)
# get fps
optionListFps = [24,30,60]
self.dropVarFps=StringVar()
self.dropVarFps.set(optionListFps[0])
self.fps_option = tk.OptionMenu(self.parameter_frame, self.dropVarFps, *optionListFps,)
self.fps_option.place(relx=0.6, rely=0.5)
# call main.py
def createVideo(self):
def creation():
# check if data is uploaded
if self.d_flag == False:
messagebox.showwarning("showwarning", "Data is not uploaded")
else:
main.BCR_Main(file_path=self.path, image_path=self.i_path, save_location=self.location, title_name=self.title_entry.get(),
bar_size=self.advanced_data.get('bar_thickness'), color_palette=self.color_entry.get().split(","),
bar_label_text_type=self.advanced_data.get('bar_label_text_type'),
text_after_bar_label=self.advanced_data.get('text_after_bar_label'),
fps = self.dropVarFps.get())
# self.progress['value'] = 100
msg = messagebox.askquestion("Done", "Video created!!! Do you want to open the video location?",icon='info')
if msg=='yes':
if self.s_flag == False:
subprocess.Popen(["open", os.path.abspath(os.getcwd())])
else:
subprocess.Popen(["open", self.location])
self.progress.destroy()
def progress_bar():
self.progress = Progressbar(self.run_frame, orient=HORIZONTAL,length=200, mode='indeterminate')
self.progress.place(relx=0.6, rely=0.3)
self.progress.start(interval=300)
# self.progress['value'] = 20
time.sleep(5)
t1 = threading.Thread(target=progress_bar)
t1.start()
t2 = threading.Thread(target=creation)
t2.start()
@property
def advanced_data(self):
options = {}
try:
options['bar_thickness'] = float(self.PopupWindow.bar_size.get())
options['text_after_bar_label'] = self.PopupWindow.text_after_bar_label.get()
options['bar_label_text_type'] = self.PopupWindow.dropVar.get()
except Exception as e:
options['bar_thickness'] = 0.95
options['text_after_bar_label'] = ''
return options
# browser button: upload data
def uploadData(self):
btn = tk.Button(self.details_frame, text="Upload Data", command=self.browseData,)
btn.place(relx=0.1, rely=0.1)
label = tk.Label(self.details_frame, text="*The data should be in csv format. eg. data.csv", bg=self.bg_color, fg=self.hint_color)
label.place(relx=0.1, rely=0.21)
def browseData(self):
self.d_flag = True
file = filedialog.askopenfilename(filetypes = (("CSV Files","*.csv"),))
if(file):
label = tk.Label(self.details_frame, text=ntpath.basename(file), bg=self.bg_color)
label.place(relx=0.6, rely=0.1)
self.path = file
else:
self.d_flag = False
label = tk.Label(self.details_frame, text="You have not selected any file.", bg=self.bg_color)
label.place(relx=0.6, rely=0.1)
# button: select image folder
def uploadImages(self):
btn = tk.Button(self.details_frame, text="Upload Image Folder", command=self.browseImages,)
btn.place(relx=0.1, rely=0.3)
label = tk.Label(self.details_frame, text="*The name of each image should match the column name in the data.\neg. If column name is 'Python', the image name must be 'Python.png'", bg=self.bg_color, fg=self.hint_color)
label.place(relx=0.1, rely=0.41)
def browseImages(self):
i_flag = True
directory = filedialog.askdirectory()
if(directory):
label = tk.Label(self.details_frame, text=ntpath.basename(directory), bg=self.bg_color)
label.place(relx=0.6, rely=0.3)
self.i_path = directory
else:
self.i_path = None
label = tk.Label(self.details_frame, text="You have not selected any folder.", bg=self.bg_color)
label.place(relx=0.6, rely=0.3)
# button: select location to save the video
def saveLocation(self):
btn = tk.Button(self.details_frame, text="Choose Video Destination", command=self.browseLocation,)
btn.place(relx=0.1, rely=0.55)
label = tk.Label(self.details_frame, text="*Choose a folder to save the video.", bg=self.bg_color, fg=self.hint_color)
label.place(relx=0.1, rely=0.65)
def browseLocation(self):
s_flag = True
directory = filedialog.askdirectory()
if(directory):
label = tk.Label(self.details_frame, text=ntpath.basename(directory), bg=self.bg_color)
label.place(relx=0.6, rely=0.55)
self.location = directory
else:
self.location = None
label = tk.Label(self.details_frame, text="You have not selected any location.", bg=self.bg_color)
label.place(relx=0.6, rely=0.55)
'''
Parameter labels
'''
# title
def titleEntry(self):
label_1 = tk.Label(self.parameter_frame, text="Title", bg=self.bg_color)
label_1.place(relx=0.1, rely=0.1)
label_2 = tk.Label(self.parameter_frame, text="*Text that appears at the top of the Video as a heading.", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.1, rely=0.2)
# color entry
def colorsEntry(self):
label_1 = tk.Label(self.parameter_frame, text="Color palette", bg=self.bg_color)
label_1.place(relx=0.1, rely=0.3)
label_2 = tk.Label(self.parameter_frame, text="*Enter the hex code of colors separated by a comma. eg: #6ECBCE,#FF2243", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.1, rely=0.4)
# get fps
def fpsEntry(self):
label_1 = tk.Label(self.parameter_frame, text="FPS", bg=self.bg_color)
label_1.place(relx=0.1, rely=0.5)
label_2 = tk.Label(self.parameter_frame, text="*Fps of the video. Default is 30", bg=self.bg_color, fg=self.hint_color)
label_2.place(relx=0.1, rely=0.6)
# button: advance options
def advancedButton(self):
btn = tk.Button(self.advanced_frame, text="Select Advanced Options", command=self.popup, bg=self.btn_color)
# btn = tk.Button(self, text="Create Video", command=self.createVideo, highlightbackground=self.btn_color) # for mac
btn.place(relx=0.35, rely=0.2)
def popup(self):
self.PopupWindow = PopupWindow(self.advanced_data)
self.PopupWindow.deiconify()
# button: run button
def runButton(self):
btn = tk.Button(self.run_frame, text="Create Video", command=self.createVideo, bg=self.btn_color)
# btn = tk.Button(self, text="Create Video", command=self.createVideo, highlightbackground=self.btn_color) # for mac
btn.place(relx=0.4, rely=0.2)
def execution(self):
# check if browse image button is clicked
if self.i_flag == False:
self.i_path = None
# check if browse saving button is clicked
if self.s_flag == False:
self.location = None
self.uploadData()
self.uploadImages()
self.saveLocation()
self.titleEntry()
self.colorsEntry()
self.advancedButton()
self.fpsEntry()
self.runButton()
self.mainloop()
if __name__ == "__main__":
app = BCR_UI()
app.execution()
|
socket_wait.py
|
from . import log
import socket
import threading
import time
HOST = '127.0.0.1' # 标准的回环地址 (localhost)
PORT = 65432 # 监听的端口 (非系统级的端口: 大于 1023)
logger = log.logger
def wait_signal_and_run(events_to_run):
logger.info('正在判斷信號是否為True。。。')
while True:
time.sleep(1)
for func in events_to_run:
if events_to_run[func][1] is True:
logger.info(f'将运行: {func}')
func_th = threading.Thread(target=func)
func_th.start()
events_to_run[func][1] = False
def socket_wait(events_to_run):
global Signal
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
logger.info('Signal Socket is Listening...')
while True:
s.listen()
conn, addr = s.accept()
with conn:
logger.info(f'Connected by{str(addr)}')
data = conn.recv(1024)
for key, value in events_to_run.items():
if data.decode() == value[0]:
events_to_run[key][1] = True
logger.info(f'Received: {data}.')
conn.sendall(b'Datas Received!Thanks!')
# if __name__ == '__main__':
# show_sig_th = threading.Thread(target=wait_signal_and_run)
# show_sig_th.start()
# socket_wait()
|
test_read_only_job_plugin.py
|
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import unittest
from threading import Thread
from unittest import mock
from ai_flow.workflow.job import Job
from ai_flow.workflow.status import Status
from ai_flow import DatasetMeta
from ai_flow.ai_graph.ai_node import AINode, ReadDatasetNode, WriteDatasetNode
from ai_flow.workflow.job_config import JobConfig
from ai_flow.ai_graph.ai_graph import AISubGraph
from ai_flow_plugins.job_plugins.bash import BashProcessor
from ai_flow_plugins.job_plugins.read_only import ReadOnlyProcessor, ReadOnlyJobGenerator, ReadOnlyJob, \
ReadOnlyJobController, ReadOnlyJobHandle
class TestReadOnlyJobGenerator(unittest.TestCase):
def test_generate_throw_unknown_type_exception(self):
sub_graph = AISubGraph(JobConfig())
ai_node = AINode(processor=BashProcessor('hello'))
sub_graph.add_node(ai_node)
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator()
with self.assertRaises(TypeError):
job_generator.generate(sub_graph)
def test_generate(self):
sub_graph = AISubGraph(JobConfig())
sub_graph.add_node(ReadDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
sub_graph.add_node(WriteDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator()
job = job_generator.generate(sub_graph)
self.assertIsInstance(job, ReadOnlyJob)
def test_generate_with_required_configs(self):
job_config = JobConfig()
sub_graph = AISubGraph(job_config)
sub_graph.add_node(ReadDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
sub_graph.add_node(WriteDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator(required_properties={'required_key'})
with self.assertRaises(RuntimeError):
job_generator.generate(sub_graph)
job_config.properties['required_key'] = 'value'
job = job_generator.generate(sub_graph)
self.assertIsInstance(job, ReadOnlyJob)
class TestReadOnlyJobController(unittest.TestCase):
def setUp(self) -> None:
self.job_controller = ReadOnlyJobController()
self.job = ReadOnlyJob(JobConfig("test_job"))
def test_submit_job(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertIsInstance(handle, ReadOnlyJobHandle)
self.assertEqual(self.job, handle.job)
self.assertEqual(job_execution_info, handle.job_execution)
def test_stop_job(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.stop_job(ReadOnlyJobHandle(mock.Mock(), job_execution_info), job_runtime_env)
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertFalse(self.job_controller._job_stop_events[handle.job].is_set())
self.job_controller.stop_job(handle, job_runtime_env)
self.assertTrue(self.job_controller._job_stop_events[handle.job].is_set())
def test_get_result(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.get_result(ReadOnlyJobHandle(mock.Mock(), job_execution_info), job_runtime_env)
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertIsNone(self.job_controller.get_result(handle, False))
def get_result():
result = self.job_controller.get_result(handle, True)
self.assertIsNone(result)
self.assertTrue(self.job_controller._job_stop_events[handle.job].is_set())
t = Thread(target=get_result)
t.start()
time.sleep(0.5)
self.job_controller.stop_job(handle, job_runtime_env)
t.join()
def test_get_job_status(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.get_job_status(ReadOnlyJobHandle(mock.Mock(), job_execution_info))
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertEqual(Status.RUNNING, self.job_controller.get_job_status(handle))
def test_obtain_job_label(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.obtain_job_label(ReadOnlyJobHandle(mock.Mock(), job_execution_info))
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertEqual("", self.job_controller.obtain_job_label(handle))
def test_obtain_job_label_check_job_type(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
job = Job(mock.Mock())
handle = self.job_controller.submit_job(job, job_runtime_env)
with self.assertRaises(TypeError):
self.job_controller.obtain_job_label(handle)
|
base.py
|
import subprocess
import json
from threading import Lock, Thread
import io
import os
import sys
import locale
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
executable_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'executable',
'kaleido'
)
class BaseScope(object):
# Subclasses may override to specify a custom JSON encoder for input data
_json_encoder = None
# Tuple of class properties that will be passed as
# command-line flags to configure chromium
_chromium_flags = ("disable_gpu",)
# Tuple of class properties that will be passed as command-line
# flags to configure scope
_scope_flags = ()
def __init__(self, disable_gpu=True):
# Collect chromium flag properties
self._disable_gpu = disable_gpu
# Internal Properties
self._std_error = io.BytesIO()
self._std_error_thread = None
self._proc = None
self._proc_lock = Lock()
def __del__(self):
self._shutdown_kaleido()
def _build_proc_args(self):
"""
Build list of kaleido command-line arguments based on current values of
the properties specified by self._chromium_flags and self._scope_flags
:return: list of flags
"""
proc_args = [executable_path, self.scope_name]
for k in self._chromium_flags + self._scope_flags:
v = getattr(self, k)
if v is True:
flag = '--' + k.replace("_", "-")
elif v is False or v is None:
# Logical flag set to False, don't include flag or argument
continue
else:
# Flag with associated value
flag = '--' + k.replace("_", "-") + "=" + repr(str(v))
proc_args.append(flag)
return proc_args
def _collect_standard_error(self):
"""
Write standard-error of subprocess to the _std_error StringIO buffer.
Intended to be called once in a background thread
"""
while True:
if self._proc is not None:
val = self._proc.stderr.readline()
self._std_error.write(val)
def _ensure_kaleido(self):
"""
Launch the kaleido subprocess if it is not already running and in a good state
"""
# Use double-check locking to make sure we only initialize the process
# from a single thread
if self._proc is None or self._proc.poll() is not None:
with self._proc_lock:
if self._proc is None or self._proc.poll() is not None:
# Wait on process if crashed to prevent zombies
if self._proc is not None:
self._proc.wait()
# Reset _std_error buffer
self._std_error = io.BytesIO()
# Launch kaleido subprocess
# Note: shell=True seems to be needed on Windows to handle executable path with
# spaces. The subprocess.Popen docs makes it sound like this shouldn't be
# necessary.
proc_args = self._build_proc_args()
self._proc = subprocess.Popen(
proc_args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=sys.platform == "win32"
)
# Set up thread to asynchronously collect standard error stream
if self._std_error_thread is None:
self._std_error_thread = Thread(target=self._collect_standard_error)
self._std_error_thread.setDaemon(True)
self._std_error_thread.start()
# Read startup message and check for errors
startup_response_string = self._proc.stdout.readline().decode('utf-8')
if not startup_response_string:
message = (
"Failed to start Kaleido subprocess. Error stream:\n\n" +
self._get_decoded_std_error()
)
raise ValueError(message)
else:
startup_response = json.loads(startup_response_string)
if startup_response.get("code", 0) != 0:
self._proc.wait()
raise ValueError(startup_response.get("message", "Failed to start Kaleido subprocess"))
def _get_decoded_std_error(self):
"""
Attempt to decode standard error bytes stream to a string
"""
std_err_str = None
try:
encoding = sys.stderr.encoding
std_err_str = self._std_error.getvalue().decode(encoding)
except Exception:
pass
if std_err_str is None:
try:
encoding = locale.getpreferredencoding(False)
std_err_str = self._std_error.getvalue().decode(encoding)
except Exception:
pass
if std_err_str is None:
std_err_str = "Failed to decode Chromium's standard error stream"
return std_err_str
def _shutdown_kaleido(self):
"""
Shut down the kaleido subprocess, if any, and self the _proc property to None
"""
# Use double-check locking to make sure we only shut down the process
# a single time when used across threads.
if self._proc is not None:
with self._proc_lock:
if self._proc is not None:
if self._proc.poll() is None:
# Process still running, close stdin to tell kaleido
# to shut down gracefully
self._proc.stdin.close()
# wait for process to terminate if it was running.
# Also prevent zombie process if process crashed
# on it's own
try:
self._proc.wait(timeout=2.0)
except:
# We tried to wait! Moving on...
pass
# Clear _proc property
self._proc = None
@property
def scope_name(self):
raise NotImplementedError
# Flag property methods
@property
def disable_gpu(self):
""" If True, asks chromium to disable GPU hardware acceleration with --disable-gpu flag"""
return self._disable_gpu
@disable_gpu.setter
def disable_gpu(self, val):
self._disable_gpu = val
self._shutdown_kaleido()
def _perform_transform(self, data, **kwargs):
"""
Transform input data using the current scope, returning dict response with error code
whether successful or not.
:param data: JSON-serializable object to be converted
:param kwargs: Transform arguments for scope
:return: Dict of response from Kaleido executable, whether successful or not
"""
# Ensure that kaleido subprocess is running
self._ensure_kaleido()
# Perform export
export_spec = json.dumps(
dict(kwargs, data=data),
cls=self._json_encoder).encode('utf-8')
# Write to process and read result within a lock so that can be
# sure we're reading the response to our request
with self._proc_lock:
# Reset _std_error buffer
self._std_error = io.BytesIO()
# Write and flush spec
self._proc.stdin.write(export_spec)
self._proc.stdin.write("\n".encode('utf-8'))
self._proc.stdin.flush()
response = self._proc.stdout.readline()
response_string = response.decode('utf-8')
if not response_string:
message = (
"Transform failed. Error stream:\n\n" +
self._get_decoded_std_error()
)
raise ValueError(message)
try:
response = json.loads(response_string)
except JSONDecodeError:
print("Invalid JSON: " + repr(response_string))
raise
return response
def transform(self, data, **kwargs):
"""
Transform input data using the current scope
Subclasses should provide a more helpful docstring
:param data: JSON-serializable object to be converted
:param kwargs: Transform arguments for scope
:return: Transformed value as bytes
"""
response = self._perform_transform(data, **kwargs)
# Check for export error
code = response.pop("code", 0)
if code != 0:
message = response.get("message", None)
raise ValueError(
"Transform failed with error code {code}: {message}".format(
code=code, message=message
)
)
img_string = response.pop("result", None)
return img_string.encode()
|
ytdl.py
|
import os
if "downloads" not in os.listdir():
os.mkdir("downloads")
import threading
import queue
import youtube_dl
import player
ydl_opts = {
"format": "bestaudio/best"
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
q = queue.Queue()
def worker():
while True:
item = q.get()
item["on_start"][0](
*item["on_start"][1],
quote=True
)
file_name = ""
info = ydl.extract_info(
item["video"],
download=False
)
if info["is_live"]:
item["on_live_err"][0](
*item["on_live_err"][1],
quote=True
)
q.task_done()
else:
file_name = info["id"] + "." + info["ext"]
if file_name in os.listdir("downloads"):
args = item["play_func"][1]
args[0] = "downloads/" + file_name
args[3] = info["title"]
args[4] = "https://youtu.be/" + info["id"]
item["play_func"][0](
*args
)
else:
ydl.download(
[
item["video"]
]
)
os.rename(
[
i
for i in os.listdir()
if i.endswith(info["ext"])
][0],
"downloads/" + file_name
)
args = item["play_func"][1]
args[0] = "downloads/" + file_name
args[3] = info["title"]
args[4] = "https://youtu.be/" + info["id"]
item["play_func"][0](
*args
)
if player.q.qsize() != 0:
item["on_end"][0](
*item["on_end"][1],
quote=True
)
q.task_done()
threading.Thread(target=worker, daemon=True).start()
def download(on_start, on_end, play_func, on_is_live_err, video):
q.put(
{
"on_start": on_start,
"on_end": on_end,
"play_func": play_func,
"on_is_live_err": on_is_live_err,
"video": video
}
)
return q.qsize()
|
tello.py
|
import threading
import socket
import time
import datetime
import struct
import sys
import os
from . import crc
from . import logger
from . import event
from . import state
from . import error
from . import video_stream
from . utils import *
from . protocol import *
from . import dispatcher
log = logger.Logger('Tello')
class Tello(object):
EVENT_CONNECTED = event.Event('connected')
EVENT_WIFI = event.Event('wifi')
EVENT_LIGHT = event.Event('light')
EVENT_FLIGHT_DATA = event.Event('fligt_data')
EVENT_LOG_HEADER = event.Event('log_header')
EVENT_LOG = EVENT_LOG_HEADER
EVENT_LOG_RAWDATA = event.Event('log_rawdata')
EVENT_LOG_DATA = event.Event('log_data')
EVENT_LOG_CONFIG = event.Event('log_config')
EVENT_TIME = event.Event('time')
EVENT_VIDEO_FRAME = event.Event('video frame')
EVENT_VIDEO_DATA = event.Event('video data')
EVENT_DISCONNECTED = event.Event('disconnected')
EVENT_FILE_RECEIVED = event.Event('file received')
# internal events
__EVENT_CONN_REQ = event.Event('conn_req')
__EVENT_CONN_ACK = event.Event('conn_ack')
__EVENT_TIMEOUT = event.Event('timeout')
__EVENT_QUIT_REQ = event.Event('quit_req')
# for backward comaptibility
CONNECTED_EVENT = EVENT_CONNECTED
WIFI_EVENT = EVENT_WIFI
LIGHT_EVENT = EVENT_LIGHT
FLIGHT_EVENT = EVENT_FLIGHT_DATA
LOG_EVENT = EVENT_LOG
TIME_EVENT = EVENT_TIME
VIDEO_FRAME_EVENT = EVENT_VIDEO_FRAME
STATE_DISCONNECTED = state.State('disconnected')
STATE_CONNECTING = state.State('connecting')
STATE_CONNECTED = state.State('connected')
STATE_QUIT = state.State('quit')
LOG_ERROR = logger.LOG_ERROR
LOG_WARN = logger.LOG_WARN
LOG_INFO = logger.LOG_INFO
LOG_DEBUG = logger.LOG_DEBUG
LOG_ALL = logger.LOG_ALL
def __init__(self, port=9000, sockfd=None, no_video_thread=False, no_recv_thread=False):
self.tello_addr = ('192.168.10.1', 8889)
self.debug = False
self.pkt_seq_num = 0x01e4
self.port = port
self.udpsize = 2000
self.left_x = 0.0
self.left_y = 0.0
self.right_x = 0.0
self.right_y = 0.0
self.sock = None
self.state = self.STATE_DISCONNECTED
self.lock = threading.Lock()
self.connected = threading.Event()
self.video_enabled = False
self.prev_video_data_time = None
self.video_data_size = 0
self.video_data_loss = 0
self.log = log
self.exposure = 0
self.video_encoder_rate = 4
self.video_stream = None
self.wifi_strength = 0
self.log_data = LogData(log)
self.log_data_file = None
self.log_data_header_recorded = False
# video zoom state
self.zoom = False
# fast mode state
self.fast_mode = False
# File recieve state.
self.file_recv = {} # Map filenum -> protocol.DownloadedFile
# Create a UDP socket
if not sockfd:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('', self.port))
else:
self.sock = socket.fromfd(sockfd, socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(2.0)
dispatcher.connect(self.__state_machine, dispatcher.signal.All)
if not no_recv_thread:
threading.Thread(target=self.__recv_thread).start()
if not no_video_thread:
threading.Thread(target=self.__video_thread).start()
def set_loglevel(self, level):
"""
Set_loglevel controls the output messages. Valid levels are
LOG_ERROR, LOG_WARN, LOG_INFO, LOG_DEBUG and LOG_ALL.
"""
log.set_level(level)
def get_video_stream(self):
"""
Get_video_stream is used to prepare buffer object which receive video data from the drone.
"""
newly_created = False
self.lock.acquire()
log.info('get video stream')
try:
if self.video_stream is None:
self.video_stream = video_stream.VideoStream(self)
newly_created = True
res = self.video_stream
finally:
self.lock.release()
if newly_created:
self.__send_exposure()
self.__send_video_encoder_rate()
self.start_video()
return res
def connect(self):
"""Connect is used to send the initial connection request to the drone."""
self.__publish(event=self.__EVENT_CONN_REQ)
def wait_for_connection(self, timeout=None):
"""Wait_for_connection will block until the connection is established."""
if not self.connected.wait(timeout):
raise error.TelloError('timeout')
def __send_conn_req(self):
port = 9617
port0 = (int(port/1000) % 10) << 4 | (int(port/100) % 10)
port1 = (int(port/10) % 10) << 4 | (int(port/1) % 10)
buf = 'conn_req:%c%c' % (chr(port0), chr(port1))
log.info('send connection request (cmd="%s%02x%02x")' % (str(buf[:-2]), port0, port1))
return self.send_packet(Packet(buf))
def subscribe(self, signal, handler):
"""Subscribe a event such as EVENT_CONNECTED, EVENT_FLIGHT_DATA, EVENT_VIDEO_FRAME and so on."""
dispatcher.connect(handler, signal)
def __publish(self, event, data=None, **args):
args.update({'data': data})
if 'signal' in args:
del args['signal']
if 'sender' in args:
del args['sender']
log.debug('publish signal=%s, args=%s' % (event, args))
dispatcher.send(event, sender=self, **args)
def takeoff(self):
"""Takeoff tells the drones to liftoff and start flying."""
log.info('set altitude limit 30m')
pkt = Packet(SET_ALT_LIMIT_CMD)
pkt.add_byte(0x1e) # 30m
pkt.add_byte(0x00)
self.send_packet(pkt)
log.info('takeoff (cmd=0x%02x seq=0x%04x)' % (TAKEOFF_CMD, self.pkt_seq_num))
pkt = Packet(TAKEOFF_CMD)
pkt.fixup()
return self.send_packet(pkt)
def throw_and_go(self):
"""Throw_and_go starts a throw and go sequence"""
log.info('throw_and_go (cmd=0x%02x seq=0x%04x)' % (THROW_AND_GO_CMD, self.pkt_seq_num))
pkt = Packet(THROW_AND_GO_CMD, 0x48)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def land(self):
"""Land tells the drone to come in for landing."""
log.info('land (cmd=0x%02x seq=0x%04x)' % (LAND_CMD, self.pkt_seq_num))
pkt = Packet(LAND_CMD)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def palm_land(self):
"""Tells the drone to wait for a hand underneath it and then land."""
log.info('palmland (cmd=0x%02x seq=0x%04x)' % (PALM_LAND_CMD, self.pkt_seq_num))
pkt = Packet(PALM_LAND_CMD)
pkt.add_byte(0x00)
pkt.fixup()
return self.send_packet(pkt)
def quit(self):
"""Quit stops the internal threads."""
log.info('quit')
self.__publish(event=self.__EVENT_QUIT_REQ)
def get_alt_limit(self):
''' ... '''
self.log.debug('get altitude limit (cmd=0x%02x seq=0x%04x)' % (
ALT_LIMIT_MSG, self.pkt_seq_num))
pkt = Packet(ALT_LIMIT_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_alt_limit(self, limit):
self.log.info('set altitude limit=%s (cmd=0x%02x seq=0x%04x)' % (
int(limit), SET_ALT_LIMIT_CMD, self.pkt_seq_num))
pkt = Packet(SET_ALT_LIMIT_CMD)
pkt.add_byte(int(limit))
pkt.add_byte(0x00)
pkt.fixup()
self.send_packet(pkt)
self.get_alt_limit()
def get_att_limit(self):
''' ... '''
self.log.debug('get attitude limit (cmd=0x%02x seq=0x%04x)' % (
ATT_LIMIT_MSG, self.pkt_seq_num))
pkt = Packet(ATT_LIMIT_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_att_limit(self, limit):
self.log.info('set attitude limit=%s (cmd=0x%02x seq=0x%04x)' % (
int(limit), ATT_LIMIT_CMD, self.pkt_seq_num))
pkt = Packet(ATT_LIMIT_CMD)
pkt.add_byte(0x00)
pkt.add_byte(0x00)
pkt.add_byte( int(float_to_hex(float(limit))[4:6], 16) ) # 'attitude limit' formatted in float of 4 bytes
pkt.add_byte(0x41)
pkt.fixup()
self.send_packet(pkt)
self.get_att_limit()
def get_low_bat_threshold(self):
''' ... '''
self.log.debug('get low battery threshold (cmd=0x%02x seq=0x%04x)' % (
LOW_BAT_THRESHOLD_MSG, self.pkt_seq_num))
pkt = Packet(LOW_BAT_THRESHOLD_MSG)
pkt.fixup()
return self.send_packet(pkt)
def set_low_bat_threshold(self, threshold):
self.log.info('set low battery threshold=%s (cmd=0x%02x seq=0x%04x)' % (
int(threshold), LOW_BAT_THRESHOLD_CMD, self.pkt_seq_num))
pkt = Packet(LOW_BAT_THRESHOLD_CMD)
pkt.add_byte(int(threshold))
pkt.fixup()
self.send_packet(pkt)
self.get_low_bat_threshold()
def __send_time_command(self):
log.info('send_time (cmd=0x%02x seq=0x%04x)' % (TIME_CMD, self.pkt_seq_num))
pkt = Packet(TIME_CMD, 0x50)
pkt.add_byte(0)
pkt.add_time()
pkt.fixup()
return self.send_packet(pkt)
def __send_start_video(self):
pkt = Packet(VIDEO_START_CMD, 0x60)
pkt.fixup()
return self.send_packet(pkt)
def __send_video_mode(self, mode):
pkt = Packet(VIDEO_MODE_CMD)
pkt.add_byte(mode)
pkt.fixup()
return self.send_packet(pkt)
def set_video_mode(self, zoom=False):
"""Tell the drone whether to capture 960x720 4:3 video, or 1280x720 16:9 zoomed video.
4:3 has a wider field of view (both vertically and horizontally), 16:9 is crisper."""
log.info('set video mode zoom=%s (cmd=0x%02x seq=0x%04x)' % (
zoom, VIDEO_START_CMD, self.pkt_seq_num))
self.zoom = zoom
return self.__send_video_mode(int(zoom))
def start_video(self):
"""Start_video tells the drone to send start info (SPS/PPS) for video stream."""
log.info('start video (cmd=0x%02x seq=0x%04x)' % (VIDEO_START_CMD, self.pkt_seq_num))
self.video_enabled = True
self.__send_exposure()
self.__send_video_encoder_rate()
return self.__send_start_video()
def set_exposure(self, level):
"""Set_exposure sets the drone camera exposure level. Valid levels are 0, 1, and 2."""
if level < 0 or 2 < level:
raise error.TelloError('Invalid exposure level')
log.info('set exposure (cmd=0x%02x seq=0x%04x)' % (EXPOSURE_CMD, self.pkt_seq_num))
self.exposure = level
return self.__send_exposure()
def __send_exposure(self):
pkt = Packet(EXPOSURE_CMD, 0x48)
pkt.add_byte(self.exposure)
pkt.fixup()
return self.send_packet(pkt)
def set_video_encoder_rate(self, rate):
"""Set_video_encoder_rate sets the drone video encoder rate."""
log.info('set video encoder rate (cmd=0x%02x seq=%04x)' %
(VIDEO_ENCODER_RATE_CMD, self.pkt_seq_num))
self.video_encoder_rate = rate
return self.__send_video_encoder_rate()
def __send_video_encoder_rate(self):
pkt = Packet(VIDEO_ENCODER_RATE_CMD, 0x68)
pkt.add_byte(self.video_encoder_rate)
pkt.fixup()
return self.send_packet(pkt)
def take_picture(self):
log.info('take picture')
return self.send_packet_data(TAKE_PICTURE_COMMAND, type=0x68)
def up(self, val):
"""Up tells the drone to ascend. Pass in an int from 0-100."""
log.info('up(val=%d)' % val)
self.left_y = val / 100.0
def down(self, val):
"""Down tells the drone to descend. Pass in an int from 0-100."""
log.info('down(val=%d)' % val)
self.left_y = val / 100.0 * -1
def forward(self, val):
"""Forward tells the drone to go forward. Pass in an int from 0-100."""
log.info('forward(val=%d)' % val)
self.right_y = val / 100.0
def backward(self, val):
"""Backward tells the drone to go in reverse. Pass in an int from 0-100."""
log.info('backward(val=%d)' % val)
self.right_y = val / 100.0 * -1
def right(self, val):
"""Right tells the drone to go right. Pass in an int from 0-100."""
log.info('right(val=%d)' % val)
self.right_x = val / 100.0
def left(self, val):
"""Left tells the drone to go left. Pass in an int from 0-100."""
log.info('left(val=%d)' % val)
self.right_x = val / 100.0 * -1
def clockwise(self, val):
"""
Clockwise tells the drone to rotate in a clockwise direction.
Pass in an int from 0-100.
"""
log.info('clockwise(val=%d)' % val)
self.left_x = val / 100.0
def counter_clockwise(self, val):
"""
CounterClockwise tells the drone to rotate in a counter-clockwise direction.
Pass in an int from 0-100.
"""
log.info('counter_clockwise(val=%d)' % val)
self.left_x = val / 100.0 * -1
def flip_forward(self):
"""flip_forward tells the drone to perform a forwards flip"""
log.info('flip_forward (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipFront)
pkt.fixup()
return self.send_packet(pkt)
def flip_back(self):
"""flip_back tells the drone to perform a backwards flip"""
log.info('flip_back (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBack)
pkt.fixup()
return self.send_packet(pkt)
def flip_right(self):
"""flip_right tells the drone to perform a right flip"""
log.info('flip_right (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipRight)
pkt.fixup()
return self.send_packet(pkt)
def flip_left(self):
"""flip_left tells the drone to perform a left flip"""
log.info('flip_left (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_forwardleft(self):
"""flip_forwardleft tells the drone to perform a forwards left flip"""
log.info('flip_forwardleft (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipForwardLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_backleft(self):
"""flip_backleft tells the drone to perform a backwards left flip"""
log.info('flip_backleft (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBackLeft)
pkt.fixup()
return self.send_packet(pkt)
def flip_forwardright(self):
"""flip_forwardright tells the drone to perform a forwards right flip"""
log.info('flip_forwardright (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipForwardRight)
pkt.fixup()
return self.send_packet(pkt)
def flip_backright(self):
"""flip_backleft tells the drone to perform a backwards right flip"""
log.info('flip_backright (cmd=0x%02x seq=0x%04x)' % (FLIP_CMD, self.pkt_seq_num))
pkt = Packet(FLIP_CMD, 0x70)
pkt.add_byte(FlipBackRight)
pkt.fixup()
return self.send_packet(pkt)
def __fix_range(self, val, min=-1.0, max=1.0):
if val < min:
val = min
elif val > max:
val = max
return val
def set_throttle(self, throttle):
"""
Set_throttle controls the vertical up and down motion of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value means upward)
"""
if self.left_y != self.__fix_range(throttle):
log.info('set_throttle(val=%4.2f)' % throttle)
self.left_y = self.__fix_range(throttle)
def set_yaw(self, yaw):
"""
Set_yaw controls the left and right rotation of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone turn to the right)
"""
if self.left_x != self.__fix_range(yaw):
log.info('set_yaw(val=%4.2f)' % yaw)
self.left_x = self.__fix_range(yaw)
def set_pitch(self, pitch):
"""
Set_pitch controls the forward and backward tilt of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone move forward)
"""
if self.right_y != self.__fix_range(pitch):
log.info('set_pitch(val=%4.2f)' % pitch)
self.right_y = self.__fix_range(pitch)
def set_roll(self, roll):
"""
Set_roll controls the the side to side tilt of the drone.
Pass in an int from -1.0 ~ 1.0. (positive value will make the drone move to the right)
"""
if self.right_x != self.__fix_range(roll):
log.info('set_roll(val=%4.2f)' % roll)
self.right_x = self.__fix_range(roll)
def toggle_fast_mode(self):
if self.fast_mode:
self.fast_mode = False
elif not self.fast_mode:
self.fast_mode = True
def manual_takeoff(self):
# Hold max 'yaw' and min 'pitch', 'roll', 'throttle' for several seconds
self.set_pitch(-1)
self.set_roll(-1)
self.set_yaw(1)
self.set_throttle(-1)
self.fast_mode = False
def __send_stick_command(self):
pkt = Packet(STICK_CMD, 0x60)
axis1 = int(1024 + 660.0 * self.right_x) & 0x7ff
axis2 = int(1024 + 660.0 * self.right_y) & 0x7ff
axis3 = int(1024 + 660.0 * self.left_y) & 0x7ff
axis4 = int(1024 + 660.0 * self.left_x) & 0x7ff
axis5 = int(self.fast_mode) & 0x01
'''
11 bits (-1024 ~ +1023) x 4 axis = 44 bits
fast_mode takes 1 bit
44 bits will be packed in to 6 bytes (48 bits)
axis4 axis3 axis2 axis1
| | | | |
4 3 2 1 0
98765432109876543210987654321098765432109876543210
| | | | | | |
byte5 byte4 byte3 byte2 byte1 byte0
'''
log.debug("stick command: fast=%d yaw=%4d thr=%4d pit=%4d rol=%4d" %
(axis5, axis4, axis3, axis2, axis1))
log.debug("stick command: fast=%04x yaw=%04x thr=%04x pit=%04x rol=%04x" %
(axis5, axis4, axis3, axis2, axis1))
packed = axis1 | (axis2 << 11) | (
axis3 << 22) | (axis4 << 33) | (axis5 << 44)
packed_bytes = struct.pack('<Q', packed)
pkt.add_byte(byte(packed_bytes[0]))
pkt.add_byte(byte(packed_bytes[1]))
pkt.add_byte(byte(packed_bytes[2]))
pkt.add_byte(byte(packed_bytes[3]))
pkt.add_byte(byte(packed_bytes[4]))
pkt.add_byte(byte(packed_bytes[5]))
pkt.add_time()
pkt.fixup()
log.debug("stick command: %s" % byte_to_hexstring(pkt.get_buffer()))
return self.send_packet(pkt)
def __send_ack_log(self, id):
pkt = Packet(LOG_HEADER_MSG, 0x50)
pkt.add_byte(0x00)
b0, b1 = le16(id)
pkt.add_byte(b0)
pkt.add_byte(b1)
pkt.fixup()
return self.send_packet(pkt)
def send_packet(self, pkt):
"""Send_packet is used to send a command packet to the drone."""
try:
cmd = pkt.get_buffer()
self.sock.sendto(cmd, self.tello_addr)
log.debug("send_packet: %s" % byte_to_hexstring(cmd))
except socket.error as err:
if self.state == self.STATE_CONNECTED:
log.error("send_packet: %s" % str(err))
else:
log.info("send_packet: %s" % str(err))
return False
return True
def send_packet_data(self, command, type=0x68, payload=[]):
pkt = Packet(command, type, payload)
pkt.fixup()
return self.send_packet(pkt)
def __process_packet(self, data):
if isinstance(data, str):
data = bytearray([x for x in data])
if str(data[0:9]) == 'conn_ack:' or data[0:9] == b'conn_ack:':
log.info('connected. (port=%2x%2x)' % (data[9], data[10]))
log.debug(' %s' % byte_to_hexstring(data))
if self.video_enabled:
self.__send_exposure()
self.__send_video_encoder_rate()
self.__send_start_video()
self.__publish(self.__EVENT_CONN_ACK, data)
return True
if data[0] != START_OF_PACKET:
log.info('start of packet != %02x (%02x) (ignored)' % (START_OF_PACKET, data[0]))
log.info(' %s' % byte_to_hexstring(data))
log.info(' %s' % str(map(chr, data))[1:-1])
return False
pkt = Packet(data)
cmd = uint16(data[5], data[6])
if cmd == LOG_HEADER_MSG:
id = uint16(data[9], data[10])
log.info("recv: log_header: id=%04x, '%s'" % (id, str(data[28:54])))
log.debug("recv: log_header: %s" % byte_to_hexstring(data[9:]))
self.__send_ack_log(id)
self.__publish(event=self.EVENT_LOG_HEADER, data=data[9:])
if self.log_data_file and not self.log_data_header_recorded:
self.log_data_file.write(data[12:-2])
self.log_data_header_recorded = True
elif cmd == LOG_DATA_MSG:
log.debug("recv: log_data: length=%d, %s" % (len(data[9:]), byte_to_hexstring(data[9:])))
self.__publish(event=self.EVENT_LOG_RAWDATA, data=data[9:])
try:
self.log_data.update(data[10:])
if self.log_data_file:
self.log_data_file.write(data[10:-2])
except Exception as ex:
log.error('%s' % str(ex))
self.__publish(event=self.EVENT_LOG_DATA, data=self.log_data)
elif cmd == LOG_CONFIG_MSG:
log.debug("recv: log_config: length=%d, %s" % (len(data[9:]), byte_to_hexstring(data[9:])))
self.__publish(event=self.EVENT_LOG_CONFIG, data=data[9:])
elif cmd == WIFI_MSG:
log.debug("recv: wifi: %s" % byte_to_hexstring(data[9:]))
self.wifi_strength = data[9]
self.__publish(event=self.EVENT_WIFI, data=data[9:])
elif cmd == ALT_LIMIT_MSG:
log.info("recv: altitude limit: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == ATT_LIMIT_MSG:
log.info("recv: attitude limit: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == LOW_BAT_THRESHOLD_MSG:
log.info("recv: low battery threshold: %s" % byte_to_hexstring(data[9:-2]))
elif cmd == LIGHT_MSG:
log.debug("recv: light: %s" % byte_to_hexstring(data[9:-2]))
self.__publish(event=self.EVENT_LIGHT, data=data[9:])
elif cmd == FLIGHT_MSG:
flight_data = FlightData(data[9:])
flight_data.wifi_strength = self.wifi_strength
log.debug("recv: flight data: %s" % str(flight_data))
self.__publish(event=self.EVENT_FLIGHT_DATA, data=flight_data)
elif cmd == TIME_CMD:
log.debug("recv: time data: %s" % byte_to_hexstring(data))
self.__publish(event=self.EVENT_TIME, data=data[7:9])
elif cmd in (SET_ALT_LIMIT_CMD, ATT_LIMIT_CMD, LOW_BAT_THRESHOLD_CMD, TAKEOFF_CMD, LAND_CMD, VIDEO_START_CMD, VIDEO_ENCODER_RATE_CMD, PALM_LAND_CMD,
EXPOSURE_CMD, THROW_AND_GO_CMD, EMERGENCY_CMD):
log.debug("recv: ack: cmd=0x%02x seq=0x%04x %s" %
(uint16(data[5], data[6]), uint16(data[7], data[8]), byte_to_hexstring(data)))
elif cmd == TELLO_CMD_FILE_SIZE:
# Drone is about to send us a file. Get ready.
# N.b. one of the fields in the packet is a file ID; by demuxing
# based on file ID we can receive multiple files at once. This
# code doesn't support that yet, though, so don't take one photo
# while another is still being received.
log.info("recv: file size: %s" % byte_to_hexstring(data))
if len(pkt.get_data()) >= 7:
(size, filenum) = struct.unpack('<xLH', pkt.get_data())
log.info(' file size: num=%d bytes=%d' % (filenum, size))
# Initialize file download state.
self.file_recv[filenum] = DownloadedFile(filenum, size)
else:
# We always seem to get two files, one with most of the payload missing.
# Not sure what the second one is for.
log.warn(' file size: payload too small: %s' % byte_to_hexstring(pkt.get_data()))
# Ack the packet.
self.send_packet(pkt)
elif cmd == TELLO_CMD_FILE_DATA:
# log.info("recv: file data: %s" % byte_to_hexstring(data[9:21]))
# Drone is sending us a fragment of a file it told us to prepare
# for earlier.
self.recv_file_data(pkt.get_data())
else:
log.info('unknown packet: %04x %s' % (cmd, byte_to_hexstring(data)))
return False
return True
def recv_file_data(self, data):
(filenum,chunk,fragment,size) = struct.unpack('<HLLH', data[0:12])
file = self.file_recv.get(filenum, None)
# Preconditions.
if file is None:
return
if file.recvFragment(chunk, fragment, size, data[12:12+size]):
# Did this complete a chunk? Ack the chunk so the drone won't
# re-send it.
self.send_packet_data(TELLO_CMD_FILE_DATA, type=0x50,
payload=struct.pack('<BHL', 0, filenum, chunk))
if file.done():
# We have the whole file! First, send a normal ack with the first
# byte set to 1 to indicate file completion.
self.send_packet_data(TELLO_CMD_FILE_DATA, type=0x50,
payload=struct.pack('<BHL', 1, filenum, chunk))
# Then send the FILE_COMPLETE packed separately telling it how
# large we thought the file was.
self.send_packet_data(TELLO_CMD_FILE_COMPLETE, type=0x48,
payload=struct.pack('<HL', filenum, file.size))
# Inform subscribers that we have a file and clean up.
self.__publish(event=self.EVENT_FILE_RECEIVED, data=file.data())
del self.file_recv[filenum]
def record_log_data(self, path = None):
if path == None:
path = '%s/Documents/tello-%s.dat' % (
os.getenv('HOME'),
datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'))
log.info('record log data in %s' % path)
self.log_data_file = open(path, 'wb')
def __state_machine(self, event, sender, data, **args):
self.lock.acquire()
cur_state = self.state
event_connected = False
event_disconnected = False
log.debug('event %s in state %s' % (str(event), str(self.state)))
if self.state == self.STATE_DISCONNECTED:
if event == self.__EVENT_CONN_REQ:
self.__send_conn_req()
self.state = self.STATE_CONNECTING
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
event_disconnected = True
self.video_enabled = False
elif self.state == self.STATE_CONNECTING:
if event == self.__EVENT_CONN_ACK:
self.state = self.STATE_CONNECTED
event_connected = True
# send time
self.__send_time_command()
elif event == self.__EVENT_TIMEOUT:
self.__send_conn_req()
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
elif self.state == self.STATE_CONNECTED:
if event == self.__EVENT_TIMEOUT:
self.__send_conn_req()
self.state = self.STATE_CONNECTING
event_disconnected = True
self.video_enabled = False
elif event == self.__EVENT_QUIT_REQ:
self.state = self.STATE_QUIT
event_disconnected = True
self.video_enabled = False
elif self.state == self.STATE_QUIT:
pass
if cur_state != self.state:
log.info('state transit %s -> %s' % (cur_state, self.state))
self.lock.release()
if event_connected:
self.__publish(event=self.EVENT_CONNECTED, **args)
self.connected.set()
if event_disconnected:
self.__publish(event=self.EVENT_DISCONNECTED, **args)
self.connected.clear()
def __recv_thread(self):
sock = self.sock
while self.state != self.STATE_QUIT:
if self.state == self.STATE_CONNECTED:
self.__send_stick_command() # ignore errors
try:
data, server = sock.recvfrom(self.udpsize)
log.debug("recv: %s" % byte_to_hexstring(data))
self.__process_packet(data)
except socket.timeout as ex:
if self.state == self.STATE_CONNECTED:
log.error('recv: timeout')
self.__publish(event=self.__EVENT_TIMEOUT)
except Exception as ex:
log.error('recv: %s' % str(ex))
show_exception(ex)
log.info('exit from the recv thread.')
def __video_thread(self):
log.info('start video thread')
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 6038
sock.bind(('', port))
sock.settimeout(1.0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 512 * 1024)
log.info('video receive buffer size = %d' %
sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
prev_video_data = None
prev_ts = None
history = []
while self.state != self.STATE_QUIT:
if not self.video_enabled:
time.sleep(1.0)
continue
try:
data, server = sock.recvfrom(self.udpsize)
now = datetime.datetime.now()
log.debug("video recv: %s %d bytes" % (byte_to_hexstring(data[0:2]), len(data)))
show_history = False
# check video data loss
video_data = VideoData(data)
loss = video_data.gap(prev_video_data)
if loss != 0:
self.video_data_loss += loss
# enable this line to see packet history
# show_history = True
prev_video_data = video_data
# check video data interval
if prev_ts is not None and 0.1 < (now - prev_ts).total_seconds():
log.info('video recv: %d bytes %02x%02x +%03d' %
(len(data), byte(data[0]), byte(data[1]),
(now - prev_ts).total_seconds() * 1000))
prev_ts = now
# save video data history
history.append([now, len(data), byte(data[0])*256 + byte(data[1])])
if 100 < len(history):
history = history[1:]
# show video data history
if show_history:
prev_ts = history[0][0]
for i in range(1, len(history)):
[ ts, sz, sn ] = history[i]
print(' %02d:%02d:%02d.%03d %4d bytes %04x +%03d%s' %
(ts.hour, ts.minute, ts.second, ts.microsecond/1000,
sz, sn, (ts - prev_ts).total_seconds()*1000,
(' *' if i == len(history) - 1 else '')))
prev_ts = ts
history = history[-1:]
# deliver video frame to subscribers
self.__publish(event=self.EVENT_VIDEO_FRAME, data=data[2:])
self.__publish(event=self.EVENT_VIDEO_DATA, data=data)
# show video frame statistics
if self.prev_video_data_time is None:
self.prev_video_data_time = now
self.video_data_size += len(data)
dur = (now - self.prev_video_data_time).total_seconds()
if 2.0 < dur:
log.info(('video data %d bytes %5.1fKB/sec' %
(self.video_data_size, self.video_data_size / dur / 1024)) +
((' loss=%d' % self.video_data_loss) if self.video_data_loss != 0 else ''))
self.video_data_size = 0
self.prev_video_data_time = now
self.video_data_loss = 0
# keep sending start video command
self.__send_start_video()
except socket.timeout as ex:
log.error('video recv: timeout')
self.start_video()
data = None
except Exception as ex:
log.error('video recv: %s' % str(ex))
show_exception(ex)
log.info('exit from the video thread.')
if __name__ == '__main__':
print('You can use test.py for testing.')
|
path.py
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import stat
import string
import threading
from mopidy import compat, exceptions
from mopidy.compat import queue, urllib
from mopidy.internal import encoding, xdg
logger = logging.getLogger(__name__)
XDG_DIRS = xdg.get_dirs()
def get_or_create_dir(dir_path):
if not isinstance(dir_path, bytes):
raise ValueError('Path is not a bytestring.')
dir_path = expand_path(dir_path)
if os.path.isfile(dir_path):
raise OSError(
'A file with the same name as the desired dir, '
'"%s", already exists.' % dir_path)
elif not os.path.isdir(dir_path):
logger.info('Creating dir %s', dir_path)
os.makedirs(dir_path, 0o755)
return dir_path
def get_or_create_file(file_path, mkdir=True, content=None):
if not isinstance(file_path, bytes):
raise ValueError('Path is not a bytestring.')
file_path = expand_path(file_path)
if isinstance(content, compat.text_type):
content = content.encode('utf-8')
if mkdir:
get_or_create_dir(os.path.dirname(file_path))
if not os.path.isfile(file_path):
logger.info('Creating file %s', file_path)
with open(file_path, 'wb') as fh:
if content is not None:
fh.write(content)
return file_path
def path_to_uri(path):
"""
Convert OS specific path to file:// URI.
Accepts either unicode strings or bytestrings. The encoding of any
bytestring will be maintained so that :func:`uri_to_path` can return the
same bytestring.
Returns a file:// URI as an unicode string.
"""
if isinstance(path, compat.text_type):
path = path.encode('utf-8')
path = urllib.parse.quote(path)
return urllib.parse.urlunsplit((b'file', b'', path, b'', b''))
def uri_to_path(uri):
"""
Convert an URI to a OS specific path.
Returns a bytestring, since the file path can contain chars with other
encoding than UTF-8.
If we had returned these paths as unicode strings, you wouldn't be able to
look up the matching dir or file on your file system because the exact path
would be lost by ignoring its encoding.
"""
if isinstance(uri, compat.text_type):
uri = uri.encode('utf-8')
return urllib.parse.unquote(urllib.parse.urlsplit(uri).path)
def split_path(path):
parts = []
while True:
path, part = os.path.split(path)
if part:
parts.insert(0, part)
if not path or path == b'/':
break
return parts
def expand_path(path):
# TODO: document as we want people to use this.
if not isinstance(path, bytes):
raise ValueError('Path is not a bytestring.')
try:
path = string.Template(path).substitute(XDG_DIRS)
except KeyError:
return None
path = os.path.expanduser(path)
path = os.path.abspath(path)
return path
def _find_worker(relative, follow, done, work, results, errors):
"""Worker thread for collecting stat() results.
:param str relative: directory to make results relative to
:param bool follow: if symlinks should be followed
:param threading.Event done: event indicating that all work has been done
:param queue.Queue work: queue of paths to process
:param dict results: shared dictionary for storing all the stat() results
:param dict errors: shared dictionary for storing any per path errors
"""
while not done.is_set():
try:
entry, parents = work.get(block=False)
except queue.Empty:
continue
if relative:
path = os.path.relpath(entry, relative)
else:
path = entry
try:
if follow:
st = os.stat(entry)
else:
st = os.lstat(entry)
if (st.st_dev, st.st_ino) in parents:
errors[path] = exceptions.FindError('Sym/hardlink loop found.')
continue
parents = parents + [(st.st_dev, st.st_ino)]
if stat.S_ISDIR(st.st_mode):
for e in os.listdir(entry):
work.put((os.path.join(entry, e), parents))
elif stat.S_ISREG(st.st_mode):
results[path] = st
elif stat.S_ISLNK(st.st_mode):
errors[path] = exceptions.FindError('Not following symlinks.')
else:
errors[path] = exceptions.FindError('Not a file or directory.')
except OSError as e:
errors[path] = exceptions.FindError(
encoding.locale_decode(e.strerror), e.errno)
finally:
work.task_done()
def _find(root, thread_count=10, relative=False, follow=False):
"""Threaded find implementation that provides stat results for files.
Tries to protect against sym/hardlink loops by keeping an eye on parent
(st_dev, st_ino) pairs.
:param str root: root directory to search from, may not be a file
:param int thread_count: number of workers to use, mainly useful to
mitigate network lag when scanning on NFS etc.
:param bool relative: if results should be relative to root or absolute
:param bool follow: if symlinks should be followed
"""
threads = []
results = {}
errors = {}
done = threading.Event()
work = queue.Queue()
work.put((os.path.abspath(root), []))
if not relative:
root = None
args = (root, follow, done, work, results, errors)
for i in range(thread_count):
t = threading.Thread(target=_find_worker, args=args)
t.daemon = True
t.start()
threads.append(t)
work.join()
done.set()
for t in threads:
t.join()
return results, errors
def find_mtimes(root, follow=False):
results, errors = _find(root, relative=False, follow=follow)
# return the mtimes as integer milliseconds
mtimes = {f: int(st.st_mtime * 1000) for f, st in results.items()}
return mtimes, errors
def is_path_inside_base_dir(path, base_path):
if not isinstance(path, bytes):
raise ValueError('path is not a bytestring')
if not isinstance(base_path, bytes):
raise ValueError('base_path is not a bytestring')
if path.endswith(os.sep):
raise ValueError('Path %s cannot end with a path separator'
% path)
# Expand symlinks
real_base_path = os.path.realpath(base_path)
real_path = os.path.realpath(path)
if os.path.isfile(path):
# Use dir of file for prefix comparision, so we don't accept
# /tmp/foo.m3u as being inside /tmp/foo, simply because they have a
# common prefix, /tmp/foo, which matches the base path, /tmp/foo.
real_path = os.path.dirname(real_path)
# Check if dir of file is the base path or a subdir
common_prefix = os.path.commonprefix([real_base_path, real_path])
return common_prefix == real_base_path
# FIXME replace with mock usage in tests.
class Mtime(object):
def __init__(self):
self.fake = None
def __call__(self, path):
if self.fake is not None:
return self.fake
return int(os.stat(path).st_mtime)
def set_fake_time(self, time):
self.fake = time
def undo_fake(self):
self.fake = None
mtime = Mtime()
|
test_timeoutqueue.py
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for timeoutqueue module.
"""
import time
from twisted.python import timeoutqueue
from twisted.trial import unittest, util
from twisted.internet import reactor, interfaces
timeoutqueueSuppression = util.suppress(
message="timeoutqueue is deprecated since Twisted 8.0",
category=DeprecationWarning)
class TimeoutQueueTest(unittest.TestCase):
"""
Test L{timeoutqueue.TimeoutQueue} class.
"""
def tearDown(self):
del self.q
def put(self):
time.sleep(1)
self.q.put(1)
def test_timeout(self):
q = self.q = timeoutqueue.TimeoutQueue()
try:
q.wait(1)
except timeoutqueue.TimedOut:
pass
else:
self.fail("Didn't time out")
test_timeout.suppress = [timeoutqueueSuppression]
def test_get(self):
q = self.q = timeoutqueue.TimeoutQueue()
start = time.time()
threading.Thread(target=self.put).start()
q.wait(1.5)
assert time.time() - start < 2
result = q.get(0)
if result != 1:
self.fail("Didn't get item we put in")
test_get.suppress = [timeoutqueueSuppression]
def test_deprecation(self):
"""
Test that L{timeoutqueue.TimeoutQueue} prints a warning message.
"""
def createQueue():
return timeoutqueue.TimeoutQueue()
self.q = self.assertWarns(
DeprecationWarning,
"timeoutqueue is deprecated since Twisted 8.0",
__file__,
createQueue)
if interfaces.IReactorThreads(reactor, None) is None:
test_get.skip = "No thread support, no way to test putting during a blocked get"
else:
global threading
import threading
|
SerialClient.py
|
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import array
import errno
import imp
import io
import multiprocessing
import Queue as queue
import socket
import struct
import sys
import threading
import time
from serial import Serial, SerialException, SerialTimeoutException
import roslib
import rospy
from std_msgs.msg import Time
from rosserial_msgs.msg import TopicInfo, Log
from rosserial_msgs.srv import RequestParamRequest, RequestParamResponse
import diagnostic_msgs.msg
ERROR_MISMATCHED_PROTOCOL = "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client"
ERROR_NO_SYNC = "no sync with device"
ERROR_PACKET_FAILED = "Packet Failed : Failed to read msg data"
def load_pkg_module(package, directory):
#check if its in the python path
path = sys.path
try:
imp.find_module(package)
except ImportError:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except ImportError:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=10)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = io.BytesIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = io.BytesIO()
req.serialize(data_buffer)
self.response = None
self.parent.send(self.id, data_buffer.getvalue())
while self.response is None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
rospy.loginfo("Fork_server is: %s" % fork_server)
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
self.serversocket.settimeout(1)
#accept connections
rospy.loginfo("Waiting for socket connection")
while not rospy.is_shutdown():
try:
clientsocket, address = self.serversocket.accept()
except socket.timeout:
continue
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % address)
self.socket = clientsocket
self.isConnected = True
if self.fork_server: # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=address)
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
rospy.loginfo("Client has exited, closing socket.")
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % address)
rospy.init_node("serial_node_%r" % address)
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if not self.isConnected:
return
length = len(data)
totalsent = 0
while totalsent < length:
try:
totalsent += self.socket.send(data[totalsent:])
except BrokenPipeError:
raise RuntimeError("RosSerialServer.write() socket connection broken")
def read(self, rqsted_length):
self.msg = b''
if not self.isConnected:
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == b'':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == b'':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except BlockingIOError:
return 0
class SerialClient(object):
"""
ServiceServer responds to requests from the serial device.
"""
header = b'\xff'
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
protocol_ver1 = b'\xff'
protocol_ver2 = b'\xfe'
protocol_ver = protocol_ver2
def __init__(self, port=None, baud=57600, timeout=5.0, fix_pyserial_for_test=False):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
self.write_queue = queue.Queue()
self.write_thread = None
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.lastsync_success = rospy.Time(0)
self.last_read = rospy.Time(0)
self.last_write = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.fix_pyserial_for_test = fix_pyserial_for_test
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
def shutdown():
self.txStopRequest()
rospy.loginfo('shutdown hook activated')
rospy.on_shutdown(shutdown)
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=10)
if port is None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
while not rospy.is_shutdown():
try:
if self.fix_pyserial_for_test:
# see https://github.com/pyserial/pyserial/issues/59
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10, rtscts=True, dsrdtr=True)
else:
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10)
break
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
time.sleep(3)
if rospy.is_shutdown():
return
time.sleep(0.1) # Wait for ready (patch for Uno)
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0)
self.requestTopics()
self.lastsync = rospy.Time.now()
def requestTopics(self):
""" Determine topics to subscribe/publish. """
rospy.loginfo('Requesting topics...')
# TODO remove if possible
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
# request topic sync
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x00\x00\xff")
def txStopRequest(self):
""" Send stop tx request to client before the node exits. """
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x0b\x00\xf4")
rospy.loginfo("Sending tx stop request")
def tryRead(self, length):
try:
read_start = time.time()
bytes_remaining = length
result = bytearray()
while bytes_remaining != 0 and time.time() - read_start < self.timeout:
with self.read_lock:
received = self.port.read(bytes_remaining)
if len(received) != 0:
self.last_read = rospy.Time.now()
result.extend(received)
bytes_remaining -= len(received)
if bytes_remaining != 0:
raise IOError("Returned short (expected %d bytes, received %d instead)." % (length, length - bytes_remaining))
return bytes(result)
except Exception as e:
raise IOError("Serial Port read failure: %s" % e)
def run(self):
""" Forward recieved messages to appropriate publisher. """
# Launch write thread.
if self.write_thread is None:
self.write_thread = threading.Thread(target=self.processWriteQueue)
self.write_thread.daemon = True
self.write_thread.start()
# Handle reading.
data = ''
read_step = None
while self.write_thread.is_alive() and not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):
if self.synced:
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_NO_SYNC)
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
with self.read_lock:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
# Find sync flag.
flag = [0, 0]
read_step = 'syncflag'
flag[0] = self.tryRead(1)
if (flag[0] != self.header):
continue
# Find protocol version.
read_step = 'protocol'
flag[1] = self.tryRead(1)
if flag[1] != self.protocol_ver:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_MISMATCHED_PROTOCOL)
rospy.logerr("Mismatched protocol version in packet (%s): lost sync or rosserial_python is from different ros release than the rosserial client" % repr(flag[1]))
protocol_ver_msgs = {
self.protocol_ver1: 'Rev 0 (rosserial 0.4 and earlier)',
self.protocol_ver2: 'Rev 1 (rosserial 0.5+)',
b'\xfd': 'Some future rosserial version'
}
if flag[1] in protocol_ver_msgs:
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
# Read message length, checksum (3 bytes)
read_step = 'message length'
msg_len_bytes = self.tryRead(3)
msg_length, _ = struct.unpack("<hB", msg_len_bytes)
# Validate message length checksum.
if sum(array.array("B", msg_len_bytes)) % 256 != 255:
rospy.loginfo("Wrong checksum for msg length, length %d, dropping message." % (msg_length))
continue
# Read topic id (2 bytes)
read_step = 'topic id'
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<H", topic_id_header)
# Read serialized message data.
read_step = 'data'
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_PACKET_FAILED)
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("expected msg length is %d", msg_length)
raise
# Reada checksum for topic id and msg
read_step = 'data checksum'
chk = self.tryRead(1)
checksum = sum(array.array('B', topic_id_header + msg + chk))
# Validate checksum.
if checksum % 256 == 255:
self.synced = True
self.lastsync_success = rospy.Time.now()
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
self.requestTopics()
time.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError as exc:
rospy.logwarn('Last read step: %s' % read_step)
rospy.logwarn('Run loop error: %s' % exc)
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
with self.read_lock:
self.port.flushInput()
with self.write_lock:
self.port.flushOutput()
self.requestTopics()
self.write_thread.join()
def setPublishSize(self, size):
if self.buffer_out < 0:
self.buffer_out = size
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, size):
if self.buffer_in < 0:
self.buffer_in = size
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
if not msg.topic_name in list(self.subscribers.keys()):
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = io.BytesIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param is None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if isinstance(param, dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if not isinstance(param, list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if t == int or t == bool:
resp.ints = param
if t == float:
resp.floats =param
if t == str:
resp.strings = param
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if msg.level == Log.ROSDEBUG:
rospy.logdebug(msg.msg)
elif msg.level == Log.INFO:
rospy.loginfo(msg.msg)
elif msg.level == Log.WARN:
rospy.logwarn(msg.msg)
elif msg.level == Log.ERROR:
rospy.logerr(msg.msg)
elif msg.level == Log.FATAL:
rospy.logfatal(msg.msg)
def send(self, topic, msg):
"""
Queues data to be written to the serial port.
"""
self.write_queue.put((topic, msg))
def _write(self, data):
"""
Writes raw data over the serial port. Assumes the data is formatting as a packet. http://wiki.ros.org/rosserial/Overview/Protocol
"""
with self.write_lock:
self.port.write(data)
self.last_write = rospy.Time.now()
def _send(self, topic, msg_bytes):
"""
Send a message on a particular topic to the device.
"""
length = len(msg_bytes)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.\n%s" % msg)
return -1
else:
# frame : header (1b) + version (1b) + msg_len(2b) + msg_len_chk(1b) + topic_id(2b) + msg(nb) + msg_topic_id_chk(1b)
length_bytes = struct.pack('<h', length)
length_checksum = 255 - (sum(array.array('B', length_bytes)) % 256)
length_checksum_bytes = struct.pack('B', length_checksum)
topic_bytes = struct.pack('<h', topic)
msg_checksum = 255 - (sum(array.array('B', topic_bytes + msg_bytes)) % 256)
msg_checksum_bytes = struct.pack('B', msg_checksum)
self._write(self.header + self.protocol_ver + length_bytes + length_checksum_bytes + topic_bytes + msg_bytes + msg_checksum_bytes)
return length
def processWriteQueue(self):
"""
Main loop for the thread that processes outgoing data to write to the serial port.
"""
while not rospy.is_shutdown():
if self.write_queue.empty():
time.sleep(0.01)
else:
data = self.write_queue.get()
while True:
try:
if isinstance(data, tuple):
topic, msg = data
self._send(topic, msg)
elif isinstance(data, bytes):
self._write(data)
else:
rospy.logerr("Trying to write invalid data type: %s" % type(data))
break
except SerialTimeoutException as exc:
rospy.logerr('Write timeout: %s' % exc)
time.sleep(1)
except RuntimeError as exc:
rospy.logerr('Write thread exception: %s' % exc)
break
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
|
producer_workload_burst.py
|
import requests
import time
from kafka import KafkaProducer
from function_chains_pb2 import LambdaFunction, ChainNode, ChainState
import numpy as np
import json
import datetime
from threading import Thread
concurrency = 1
def main():
# NOTE: you can set invoke count of each function below the chain definitions (the concurrencies variable)
# Long Running Function Chain
# Chain id: 1
# codePublish
publish_init = LambdaFunction(url="publishinit.com", id=200)
publish_compile = LambdaFunction(url="publishcompile.com", id=201)
publish_compress = LambdaFunction(url="publishcompress.com", id=202)
node8_publish_fan7 = ChainNode(function=publish_compile, nodeID=8, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_386",
"s3_bucket": "code-publish-bucket",
"arch": "386",
})
node7_publish_fan7 = ChainNode(function=publish_compile, nodeID=7, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_amd64",
"s3_bucket": "code-publish-bucket",
"arch": "amd64",
})
node6_publish_fan7 = ChainNode(function=publish_compile, nodeID=6, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_arm_5",
"s3_bucket": "code-publish-bucket",
"arch": "arm_5",
})
node5_publish_fan7 = ChainNode(function=publish_compile, nodeID=5, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_arm_6",
"s3_bucket": "code-publish-bucket",
"arch": "arm_6",
})
node4_publish_fan7 = ChainNode(function=publish_compile, nodeID=4, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_arm_7",
"s3_bucket": "code-publish-bucket",
"arch": "arm_7",
})
node3_publish_fan7 = ChainNode(function=publish_compile, nodeID=3, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_arm64",
"s3_bucket": "code-publish-bucket",
"arch": "arm64",
})
node2_publish_fan7 = ChainNode(function=publish_compile, nodeID=2, children=[], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"s3_output": "releases/fzf_test/fzf_ppc64le",
"s3_bucket": "code-publish-bucket",
"arch": "ppc64le",
})
node1_publish_fan7 = ChainNode(function=publish_init, nodeID=1, children=[node2_publish_fan7, node3_publish_fan7, node4_publish_fan7, node5_publish_fan7, node6_publish_fan7, node7_publish_fan7, node8_publish_fan7], lastNodeIDs=[2, 3, 4, 5, 6, 7, 8], chainFunctionIDs=[200, 201], args = {
"s3_input": "repos/fzf.tar.gz",
"arches": json.dumps([
"386",
"amd64",
"arm_5",
"arm_6",
"arm_7",
"arm64",
"ppc64le"]),
})
chain1 = node1_publish_fan7
# Medium Function Chain 1
# Chain id: 2
vidpipe_init = LambdaFunction(url="vidpipeinit.com", id=100)
vidpipe_preview = LambdaFunction(url="vidpipepreview.com", id=101)
vidpipe_grayscale = LambdaFunction(url="vidpipegrayscale.com", id=102)
vidpipe_reverse = LambdaFunction(url="vidpipereverse.com", id=103)
node3_vidpipe_linear_short = ChainNode(function=vidpipe_reverse, nodeID=3, children=[], lastNodeIDs=[3], chainFunctionIDs=[101, 102, 103], args = {
"s3_input": "linear/grayscale-preview-smaller_buck_bunny-1.mp4",
"s3_bucket": "video-pipeline-io",
"s3_output_root": "linear/"})
node2_vidpipe_linear_short = ChainNode(function=vidpipe_grayscale, nodeID=2, children=[node3_vidpipe_linear_short], lastNodeIDs=[3], chainFunctionIDs=[101, 102, 103], args = {
"s3_input": "linear/preview-smaller_buck_bunny-1.mp4",
"s3_bucket": "video-pipeline-io",
"s3_output_root": "linear/"})
node1_vidpipe_linear_short = ChainNode(function=vidpipe_preview, nodeID=1, children=[node2_vidpipe_linear_short], lastNodeIDs=[3], chainFunctionIDs=[101, 102, 103], args = {
"s3_input": "big_buck_bunny/smaller_buck_bunny-1.mp4",
"s3_bucket": "video-pipeline-io",
"s3_output_root": "linear/"})
chain2 = node1_vidpipe_linear_short
# Medium Function Chain 2
# Chain id: 3
## ObjectDetection using openCV
# L2
# /
# L1
# \
# L3
objectDetection1 = LambdaFunction(url="objectdetection1.com", id=1)
objectDetection2 = LambdaFunction(url="objectdetection2.com", id=2)
objectDetection3 = LambdaFunction(url="objectdetection3.com", id=3)
node3_c6 = ChainNode(function=objectDetection3, nodeID=3, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
node2_c6 = ChainNode(function=objectDetection2, nodeID=2, children=[], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
node1_c6 = ChainNode(function=objectDetection1, nodeID=1, children=[node2_c6, node3_c6], lastNodeIDs=[2,3], chainFunctionIDs=[1,2,3], args={})
chain3 = node1_c6
# Short Function Chain
# Chain id: 4
bagOfWords = LambdaFunction(url="bagofwords.com", id=300)
node1_c4 = ChainNode(function=bagOfWords, nodeID=1, children=[], lastNodeIDs=[1], chainFunctionIDs=[1], args={})
chain4 = node1_c4
concurrencies = [
{ "chain_id": 1, "chain": chain1, "count": 1 },
{ "chain_id": 2, "chain": chain2, "count": 1 },
{ "chain_id": 3, "chain": chain3, "count": 1 },
{ "chain_id": 4, "chain": chain4, "count": 1 },
]
no_of_invocations_long = 2
no_of_invocations_medium_1 = 4
no_of_invocations_medium_2 = 3
no_of_invocations_short = 300
# chain_long = Workload("Fan2", no_of_invocations_long, concurrency, chain1, 1, 1000) // update these
# chain_medium_1 = Workload("Linear", no_of_invocations_medium_1, concurrency, chain2, 2, 1000) // update these
# chain_medium_2 = Workload("FanAndLinear", no_of_invocations_medium_2, concurrency, chain5, 5, 1000) // update these
# chain_short = Workload("FanAndLinear", no_of_invocations_short, concurrency, chain5, 5, 1000) // update these
chain_long_sample =[1000] #generateSample(no_of_invocations_long, concurrency)
chain_medium_1_sample = [1000]#generateSample(no_of_invocations_medium_1, concurrency)
chain_medium_2_sample = [1000]#generateSample(no_of_invocations_medium_2, concurrency)
chain_short_sample =[1000]# generateSample(no_of_invocations_short, concurrency)
instance_id = 0
# Kafka connections
kafka_url = "localhost:9092"
producer_PQ = connect_kafka_producer(kafka_url)
producer_PSQ = connect_kafka_producer(kafka_url)
for x in range(concurrency):
print('here')
start = time.time()
counter_long = chain_long_sample[x]
counter_medium_1 = chain_medium_1_sample[x]
counter_medium_2 = chain_medium_2_sample[x]
counter_short = chain_short_sample[x]
while counter_long > 0 or counter_medium_1 >0 or counter_medium_2 >0 or counter_short > 0:
if counter_long > 0:
t1 = Thread(target=startWorkload, args=(concurrencies[0]['chain'],concurrencies[0]['chain_id'], instance_id,1, producer_PQ, producer_PSQ))
t1.start()
counter_long-=1
if counter_medium_1 > 0:
t2 = Thread(target=startWorkload, args=(concurrencies[1]['chain'],concurrencies[1]['chain_id'], instance_id,1, producer_PQ, producer_PSQ))
t2.start()
counter_medium_1-=1
if counter_medium_2 > 0:
t3 = Thread(target=startWorkload, args=(concurrencies[2]['chain'],concurrencies[2]['chain_id'], instance_id,1, producer_PQ, producer_PSQ))
t3.start()
counter_medium_2-=1
if counter_short > 0:
t4 = Thread(target=startWorkload, args=(concurrencies[3]['chain'], concurrencies[3]['chain_id'], instance_id,1, producer_PQ, producer_PSQ))
t4.start()
counter_short-=1
instance_id += 1
elapsed = time.time() - start
# print(1-elapsed)
if 1-elapsed>0:
time.sleep(1-elapsed)
def startWorkload(chain, chain_id, instance_id, requests, producer_PQ, producer_PSQ):
invocations = list()
for k in range(requests):
invocations.append(ChainState(currentNode=chain,invokeTime=str(datetime.datetime.now().isoformat('T')), instanceID=instance_id, chainID=chain_id, flags={"hybrid": ""}))
pushEvents(invocations, producer_PQ, producer_PSQ)
return
def pushEvents(invocations, producer_PQ, producer_PSQ):
kafka_pq_topic = "pending_queue"
kafka_psq_topic = "producer_side_logging_queue"
if producer_PQ is not None:
for invocation in invocations:
PARAMS = {
"instanceId": invocation.instanceID,
"chainId": invocation.chainID,
"chainFunctionIdList": invocation.currentNode.chainFunctionIDs,
"extras": [{"functionPriorities":{"1":1,"2":2,"3":3}} ]
}
tempLog = "producerSide-start-"+str(time.time())+"-"+str(PARAMS)
publish_message(producer_PSQ, kafka_psq_topic, 'raw', bytes(tempLog, encoding = 'utf-8'))
publish_message(producer_PQ, kafka_pq_topic, 'raw', invocation.SerializeToString())
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding = 'utf-8')
value_bytes = value
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
if (topic_name == "pending_queue"):
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
def generateSample(arrival_time, sample_size):
return np.random.poisson(arrival_time, sample_size)
def connect_kafka_producer(url):
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=[url], api_version=(0, 10))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
if __name__ == '__main__':
main()
|
fib_process.py
|
import time
import multiprocessing
from multiprocessing import Process
def fib(number):
if number <= 1: return 1
return fib(number - 2) + fib(number - 1)
if __name__ == '__main__':
processes = []
for _ in range(5):
p = Process(target=fib, args=(30,))
processes.append(p)
start = time.clock()
for process in processes:
process.start()
for process in processes:
process.join()
end = time.clock()
print('\nprocesses time', end - start)
print('cores', multiprocessing.cpu_count())
|
daemon_thread.py
|
#!/usr/bin/env python3
import logging
import threading
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,), daemon=True)
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
x.join()
logging.info("Main : all done")
|
pycroservices.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Satoyuki Tsukano
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import boto3
import json
import logging
import uuid
from functools import wraps
logger = logging.getLogger(__name__)
pycro_context_repositry = dict()
config_repositry = dict()
class LocalContext:
def __init__(self, function_name):
self.function_name = function_name
RUN_MODE_AWS = 'AWS'
RUN_MODE_LOCAL = 'local'
class PycroContext:
def __init__(self, name, mode):
logger.info("init PycroContext name={}".format(name))
self.mode = mode
self.name = name
if self.name not in config_repositry:
_load_config(self.name)
config = config_repositry[self.name]
self.my_prop = _create_my_prop(config, self.mode)
if self.mode is RUN_MODE_AWS:
# AWS mode
self.client = boto3.client('lambda')
def emit(self, out_payload, context):
out_event = {"payload": out_payload}
in_event = context.in_event
# set trace_id to out_event
if 'trace_id' in in_event:
trace_id = in_event['trace_id']
else:
trace_id = str(uuid.uuid4())
out_event['trace_id'] = trace_id
# check 'call depth limit'
limit = self.my_prop['call_depth_limit']
roots = in_event.get('roots', [])
if len(roots) >= limit:
logger.warn("limit over! limit is {}, roots is {}".format(limit, roots))
return
roots.append(self.name)
out_event['roots'] = roots
# invoke following functions
if self.mode is RUN_MODE_AWS:
# AWS mode
for following in self.my_prop['followings']:
out_json = json.JSONEncoder().encode(out_event)
logger.info("invoke_async following=" + following + " event=" + out_json)
self.client.invoke_async(FunctionName=following, InvokeArgs=out_json)
else:
# local mode
from multiprocessing import Process
for (function_name, target_method) in self.my_prop['followings']:
context = LocalContext(function_name)
logger.info("invoke_async following=" + function_name + " event=" + str(out_event))
process = Process(target=target_method, args=(out_event, context))
process.start()
def _load_config(name):
with open(name + '.json', mode='r') as f:
config_repositry[name] = json.load(f)
def _create_my_prop(config_json, mode):
call_depth_limit = config_json.get('call_depth_limit', 10)
if mode is RUN_MODE_AWS:
# AWS mode
return {
'name': config_json['name'],
'call_depth_limit': call_depth_limit,
'followings': config_json.get('followings', [])
}
else:
# local mode
if 'followings' not in config_json:
return {
'name': config_json['name'],
'call_depth_limit': call_depth_limit,
'followings': []
}
followings = config_json['followings']
my_prop_followings = []
for following in followings:
if following not in config_repositry:
_load_config(following)
handler = config_repositry[following]['handler']
[module_name, attr_name] = handler.split('.')
mod = __import__(module_name)
target_method = getattr(mod, attr_name)
my_prop_followings.append((following, target_method))
return {
'name': config_json['name'],
'call_depth_limit': call_depth_limit,
'followings': my_prop_followings
}
def _get_pc(context):
name = context.function_name
if name not in pycro_context_repositry:
if hasattr(context, 'aws_request_id'):
# AWS mode
pycro_context_repositry[name] = PycroContext(name, RUN_MODE_AWS)
else:
# local mode
pycro_context_repositry[name] = PycroContext(name, RUN_MODE_LOCAL)
return pycro_context_repositry[name]
def function(auto_emit=True):
def _function(func):
@wraps(func)
def wrapper(*args):
in_event = args[0]
context = args[1]
logger.info("receive={}".format(str(in_event)))
# add attributes to context
pc = _get_pc(context)
context.pycro_context = pc
context.in_event = in_event
# parse in_event
in_payload = in_event.get('payload', in_event)
# call original function
out_payload = func(in_payload, context)
# call following functions
if auto_emit and out_payload is not None:
pc.emit(out_payload, context)
return out_payload
return wrapper
return _function
|
test.py
|
import argparse
import atexit
import crc16
import serial
import sys
import time
import threading
import traceback
magicpacket = [0xde, 0xad, 0xbe, 0xef]
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", help="Serial port name")
parser.add_argument("-b", "--baud", help="Baud rate for serial port", type=int)
args = parser.parse_args()
portname = args.port
baud = args.baud
serial_reader_run = True
def serial_reader():
flush = True
while (serial_reader_run):
try:
#print(port.inWaiting())
if port.inWaiting() > 0:
flush = True
inbytes = port.read()
for b in inbytes:
if b >= 0x20 and b <= 0x255:
sys.stdout.write(chr(b));
elif b == 0x00:
sys.stdout.write("{NUL}")
elif b == 0x01:
sys.stdout.write("{SOH}")
elif b == 0x02:
sys.stdout.write("{STX}")
elif b == 0x03:
sys.stdout.write("{ETX}")
elif b == 0x04:
sys.stdout.write("{EOT}")
elif b == 0x05:
sys.stdout.write("{ENQ}")
elif b == 0x06:
sys.stdout.write("{ACK}")
elif b == 0x07:
sys.stdout.write("{BEL}")
elif b == 0x08:
sys.stdout.write("{BS}")
elif b == 0x09:
sys.stdout.write("\t")
elif b == 0x0a:
sys.stdout.write("\n")
elif b == 0x0b:
sys.stdout.write("\v")
elif b == 0x0c:
sys.stdout.write("\f")
elif b == 0x0d:
sys.stdout.write("\r")
elif b == 0x0e:
sys.stdout.write("{SO}")
elif b == 0x0f:
sys.stdout.write("{SI}")
elif b == 0x10:
sys.stdout.write("{DLE}")
elif b == 0x11:
sys.stdout.write("{DC1}")
elif b == 0x12:
sys.stdout.write("{DC2}")
elif b == 0x13:
sys.stdout.write("{DC3}")
elif b == 0x14:
sys.stdout.write("{DC4}")
elif b == 0x15:
sys.stdout.write("{NAK}")
elif b == 0x16:
sys.stdout.write("{SYN}")
elif b == 0x17:
sys.stdout.write("{ETB}")
elif b == 0x18:
sys.stdout.write("{CAN}")
elif b == 0x19:
sys.stdout.write("{EM}")
elif b == 0x1a:
sys.stdout.write("{SUB}")
elif b == 0x1b:
sys.stdout.write("{ESC}")
elif b == 0x1c:
sys.stdout.write("{FS}")
elif b == 0x1d:
sys.stdout.write("{GS}")
elif b == 0x1e:
sys.stdout.write("{RS}")
elif b == 0x1f:
sys.stdout.write("{US}")
else:
sys.stdout.write("{WTF}")
else:
if flush:
sys.stdout.flush()
flush = False
#time.sleep(0.001)
except SystemExit:
sys.stdout.write('SERIAL exiting due to SystemExit\n')
break
except OSError:
e = sys.exc_info()
sys.stderr.write('SERIAL: {0}: {1}\n'.format(e[0],e[1]))
traceback.print_tb(e[2])
break
except:
e = sys.exc_info()
sys.stderr.write('SERIAL: {0}: {1}\n'.format(e[0],e[1]))
traceback.print_tb(e[2])
port = serial.Serial(port=portname, baudrate=baud, rtscts=True, timeout=3)
sys.stdout.write("SERIAL: opened {0} @ {1}\n".format(portname, baud))
serial_thread = threading.Thread(target=serial_reader, args=[])
serial_thread.start()
def allstop():
global serial_reader_run
sys.stdout.write("Allstop called\n")
serial_reader_run = False
serial_thread.join()
port.close()
def ehook(type, value, traceback):
sys.stderr.write("I found an ehook!\n")
if type is SystemExit:
allstop()
sys.__excepthook__(type, value, traceback)
else:
sys.__excepthook__(type, value, traceback)
sys.excepthook = ehook
def highlow16(val):
h = (val>>8)&0xff
l = val & 0xff
return (h,l)
def sendmagic():
if port.is_open:
port.write(bytearray(magicpacket))
else:
sys.stderr.write("SERIAL: Port cannot write as it is closed\n")
def senddata(data):
dl = len(data)
bad = bytes(data)
crc = crc16.crc16xmodem(bad)
crchl = highlow16(crc)
if dl > 4096:
raise IndexError("Data length must be <=4096 ({1} found)".format(dl))
dlhl = highlow16(dl)
d2s = magicpacket+[dlhl[0],dlhl[1]]+data+[crchl[0],crchl[1]]
port.write(bytearray(d2s))
|
onosnet.py
|
#!/usr/bin/python
import itertools
import os
import signal
import sys
from argparse import ArgumentParser
from subprocess import call
from threading import Thread
from time import sleep
import gratuitousArp
from mininet.cli import CLI
from mininet.examples.controlnet import MininetFacade
from mininet.link import TCLink
from mininet.log import info, output, error
from mininet.log import setLogLevel
from mininet.net import Mininet
from mininet.node import RemoteController, Node
ARP_PATH = gratuitousArp.__file__.replace('.pyc', '.py')
class ONOSMininet( Mininet ):
def __init__( self, controllers=[], gratuitousArp=True, build=True, *args, **kwargs ):
"""Create Mininet object for ONOS.
controllers: List of controller IP addresses
gratuitousArp: Send an ARP from each host to aid controller's host discovery"""
# delay building for a second
kwargs[ 'build' ] = False
Mininet.__init__(self, *args, **kwargs )
self.gratArp = gratuitousArp
# If a controller is not provided, use list of remote controller IPs instead.
if 'controller' not in kwargs or not kwargs['controller']:
info ( '*** Adding controllers\n' )
ctrl_count = 0
for controllerIP in controllers:
self.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
info( ' c%d (%s)\n' % ( ctrl_count, controllerIP ) )
ctrl_count = ctrl_count + 1
if self.topo and build:
self.build()
def start( self ):
Mininet.start( self )
if self.gratArp:
self.waitConnected( timeout=5 )
info ( '*** Sending a gratuitious ARP from each host\n' )
self.gratuitousArp()
def verifyHosts( self, hosts ):
for i in range( len( hosts ) ):
if isinstance( hosts[i], str):
if hosts[i] in self:
hosts[i] = self[ hosts[i] ]
else:
info( '*** ERROR: %s is not a host\n' % hosts[i] )
del hosts[i]
elif not isinstance( hosts[i], Node):
del hosts[i]
def gratuitousArp( self, hosts=[] ):
"Send an ARP from each host to aid controller's host discovery; fallback to ping if necessary"
if not hosts:
hosts = self.hosts
self.verifyHosts( hosts )
for host in hosts:
info( '%s ' % host.name )
info( host.cmd( ARP_PATH ) )
info ( '\n' )
def pingloop( self ):
"Loop forever pinging the full mesh of hosts"
setLogLevel( 'error' )
try:
while True:
self.ping()
finally:
setLogLevel( 'info' )
def bgIperf( self, hosts=[], seconds=10 ):
self.verifyHosts( hosts )
servers = [ host.popen("iperf -s") for host in hosts ]
clients = []
for s, d in itertools.combinations(hosts, 2):
info ( '%s <--> %s\n' % ( s.name, d.name ))
cmd = 'iperf -c %s -t %s -y csv' % (d.IP(), seconds)
p = s.popen(cmd)
p.s = s.name
p.d = d.name
clients.append(p)
def handler (_signum, _frame):
raise BackgroundException()
oldSignal = signal.getsignal(signal.SIGTSTP)
signal.signal(signal.SIGTSTP, handler)
def finish( verbose=True ):
for c in clients:
out, err = c.communicate()
if verbose:
if err:
info( err )
else:
bw = out.split( ',' )[8]
info( '%s <--> %s: %s\n' % ( c.s, c.d, formatBw(bw) ) )
for s in servers:
s.terminate()
try:
info ( 'Press ^Z to continue in background or ^C to abort\n')
progress( seconds )
finish()
except KeyboardInterrupt:
for c in clients:
c.terminate()
for s in servers:
s.terminate()
except BackgroundException:
info( '\n*** Continuing in background...\n' )
t = Thread( target=finish, args=[ False ] )
t.start()
finally:
#Disable custom background signal
signal.signal(signal.SIGTSTP, oldSignal)
def progress(t):
while t > 0:
sys.stdout.write( '.' )
t -= 1
sys.stdout.flush()
sleep(1)
print
def formatBw( bw ):
bw = float(bw)
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
if bw > 1000:
bw /= 1000
return '%.2f Gbps' % bw
return '%.2f Mbps' % bw
return '%.2f Kbps' % bw
return '%.2f bps' % bw
class BackgroundException( Exception ):
pass
def get_mn(mn):
if isinstance(mn, ONOSMininet):
return mn
elif isinstance(mn, MininetFacade):
# There's more Mininet objects instantiated (e.g. one for the control network in onos.py).
for net in mn.nets:
if isinstance(net, ONOSMininet):
return net
return None
def do_bgIperf( self, line ):
args = line.split()
if not args:
output( 'Provide a list of hosts.\n' )
#Try to parse the '-t' argument as the number of seconds
seconds = 10
for i, arg in enumerate(args):
if arg == '-t':
if i + 1 < len(args):
try:
seconds = int(args[i + 1])
except ValueError:
error( 'Could not parse number of seconds: %s', args[i+1] )
del(args[i+1])
del args[i]
hosts = []
err = False
for arg in args:
if arg not in self.mn:
err = True
error( "node '%s' not in network\n" % arg )
else:
hosts.append( self.mn[ arg ] )
mn = get_mn( self.mn )
if "bgIperf" in dir( mn ) and not err:
mn.bgIperf( hosts, seconds=seconds )
else:
output('Background Iperf is not supported.\n')
def do_gratuitousArp( self, line ):
args = line.split()
mn = get_mn(self.mn)
if "gratuitousArp" in dir( mn ):
mn.gratuitousArp( args )
else:
output( 'Gratuitous ARP is not supported.\n' )
CLI.do_bgIperf = do_bgIperf
CLI.do_gratuitousArp = do_gratuitousArp
def parse_args():
parser = ArgumentParser(description='ONOS Mininet')
parser.add_argument('--cluster-size', help='Starts an ONOS cluster with the given number of instances',
type=int, action='store', dest='clusterSize', required=False, default=0)
parser.add_argument('--netcfg', help='Relative path of the JSON file to be used with netcfg',
type=str, action='store', dest='netcfgJson', required=False, default='')
parser.add_argument('ipAddrs', metavar='IP', type=str, nargs='*',
help='List of controller IP addresses', default=[])
return parser.parse_args()
def run( topo, controllers=None, link=TCLink, autoSetMacs=True):
if not topo:
print 'Need to provide a topology'
exit(1)
args = parse_args()
if not controllers and len(args.ipAddrs) > 0:
controllers = args.ipAddrs
if not controllers and args.clusterSize < 1:
print 'Need to provide a list of controller IPs, or define a cluster size.'
exit( 1 )
setLogLevel( 'info' )
if args.clusterSize > 0:
if 'ONOS_ROOT' not in os.environ:
print "Environment var $ONOS_ROOT not set (needed to import onos.py)"
exit( 1 )
sys.path.append(os.environ["ONOS_ROOT"] + "/tools/dev/mininet")
from onos import ONOSCluster, ONOSOVSSwitch, ONOSCLI
controller = ONOSCluster('c0', args.clusterSize)
onosAddr = controller.nodes()[0].IP()
net = ONOSMininet( topo=topo, controller=controller, switch=ONOSOVSSwitch, link=link,
autoSetMacs=autoSetMacs )
cli = ONOSCLI
else:
onosAddr = controllers[0]
net = ONOSMininet(topo=topo, controllers=controllers, link=link, autoSetMacs=autoSetMacs)
cli = CLI
net.start()
if len(args.netcfgJson) > 0:
if not os.path.isfile(args.netcfgJson):
error('*** WARNING no such netcfg file: %s\n' % args.netcfgJson)
else:
info('*** Setting netcfg: %s\n' % args.netcfgJson)
call(("onos-netcfg", onosAddr, args.netcfgJson))
cli( net )
net.stop()
|
map_stage_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class MapStageTest(test.TestCase):
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
stage = stager.put(pi, [v], [0])
k, y = stager.get(gi)
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i+1, gi:i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32])
stage = stager.put(pi, [x, v], [0, 1])
k, (z, y) = stager.get(gi)
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i+1, gi:i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi,{'x': x, 'v': v})
key, ret = stager.get(gi)
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i+1, gi:i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
y = stager.put(1, [v], [0])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
_, x = stager.get(1)
y = stager.peek(1)
_, z = stager.get()
self.assertEqual(x.device, '/device:CPU:0')
self.assertEqual(y.device, '/device:CPU:0')
self.assertEqual(z.device, '/device:CPU:0')
G.finalize()
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.int32, ], shapes=[[]])
stage = stager.put(pi,[x], [0])
peek = stager.peek(gi)
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
for i in range(n):
sess.run(stage, feed_dict={x:i, pi:i})
for i in range(n):
self.assertTrue(sess.run(peek, feed_dict={gi: i}) == i)
self.assertTrue(sess.run(size) == 10)
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi,{'x': x, 'v': v})
size = stager.size()
clear = stager.clear()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 3})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1, pi: 1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.int32, ],
capacity=capacity, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i,
sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testMemoryLimit(self):
memory_limit = 512*1024 # 512K
chunk = 200*1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.uint8],
memory_limit=memory_limit, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
data = np.full(chunk, i, dtype=np.uint8)
sess.run(stage, feed_dict={x: data, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i,
sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testOrdering(self):
import six
import random
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.int32, ],
shapes=[[]], ordered=True)
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
# Keys n-1..0
keys = list(reversed(six.moves.range(n)))
for i in keys:
sess.run(stage, feed_dict={pi: i, x: i})
self.assertTrue(sess.run(size) == n)
# Check that key, values come out in ascending order
for i, k in enumerate(reversed(keys)):
get_key, values = sess.run(get)
self.assertTrue(i == k == get_key == values)
self.assertTrue(sess.run(size) == 0)
def testPartialDictInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi,{'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(sess.run([key, ret], feed_dict={gi:0})
== [0, { 'x':1, 'f':2, 'v':1}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(sess.run([key, ret], feed_dict={gi:1})
== [1, { 'x':1, 'f':2, 'v':3}])
def testPartialIndexInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xf = stager.put(pi, [x, f], [0, 2])
stage_v = stager.put(pi, [v], [1])
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(sess.run([key, ret], feed_dict={gi:0})
== [0, [1, 1, 2]])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(sess.run([key, ret], feed_dict={gi:1})
== [1, [1,3, 2]])
def testPartialDictGetsAndPeeks(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi,{'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
peek_xf = stager.peek(pei, ['x', 'f'])
peek_v = stager.peek(pei, ['v'])
key_xf, get_xf = stager.get(gi, ['x', 'f'])
key_v, get_v = stager.get(gi, ['v'])
pop_key_xf, pop_xf = stager.get(indices=['x', 'f'])
pop_key_v, pop_v = stager.get(pi, ['v'])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now peek at 'x' and 'f' values associated with key 0
self.assertTrue(sess.run(peek_xf, feed_dict={pei:0})
== { 'x':1, 'f':2})
# Peek at 'v' value associated with key 0
self.assertTrue(sess.run(peek_v, feed_dict={pei:0})
== { 'v':1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain 'x' and 'f' values associated with key 0
self.assertTrue(sess.run([key_xf, get_xf], feed_dict={gi:0})
== [0, { 'x':1, 'f':2}])
# Still have 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can no longer get 'x' and 'f' from key 0
with self.assertRaises(errors.InvalidArgumentError) as cm:
sess.run([key_xf, get_xf], feed_dict={gi:0})
exc_str = ("Tensor at index '0' for key '0' "
"has already been removed.")
self.assertTrue(exc_str in cm.exception.message)
# Obtain 'v' value associated with key 0
self.assertTrue(sess.run([key_v, get_v], feed_dict={gi:0})
== [0, { 'v':1}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Pop without key to obtain 'x' and 'f' values associated with key 1
self.assertTrue(sess.run([pop_key_xf, pop_xf])
== [1, { 'x':1, 'f':2}])
# still 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# We can now obtain 'x' and 'f' values associated with key 1
self.assertTrue(sess.run([pop_key_v, pop_v], feed_dict={pi:1})
== [1, { 'v': 1 }])
# Nothing is left
self.assertTrue(sess.run([size, isize]) == [0, 0])
def testPartialIndexGets(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test again with partial index gets
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2])
key_xf, get_xf = stager.get(gi, [0, 2])
key_v, get_v = stager.get(gi, [1])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage complete tuple
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get using indices
self.assertTrue(sess.run([key_xf, get_xf],
feed_dict={gi: 0}) == [0, [1, 2]])
# Still some of key 0 left
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get of remaining index
self.assertTrue(sess.run([key_v, get_v],
feed_dict={gi: 0}) == [0, [3]])
# All gone
self.assertTrue(sess.run([size, isize]) == [0, 0])
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.